diff --git a/.gitignore b/.gitignore index d5f4804ed07c..162bd2b67bdf 100644 --- a/.gitignore +++ b/.gitignore @@ -44,6 +44,7 @@ *.tab.[ch] *.tar *.xz +*.zst Module.symvers modules.builtin modules.order diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index fb95fad81c79..8c0d045f1dc7 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -3079,6 +3079,8 @@ no5lvl [X86-64] Disable 5-level paging mode. Forces kernel to use 4-level paging instead. + nofsgsbase [X86] Disables FSGSBASE instructions. + no_console_suspend [HW] Never suspend the console Disable suspending of consoles during suspend and diff --git a/Documentation/admin-guide/sysctl/vm.rst b/Documentation/admin-guide/sysctl/vm.rst index d46d5b7013c6..4b7c496199ca 100644 --- a/Documentation/admin-guide/sysctl/vm.rst +++ b/Documentation/admin-guide/sysctl/vm.rst @@ -119,6 +119,21 @@ all zones are compacted such that free memory is available in contiguous blocks where possible. This can be important for example in the allocation of huge pages although processes will also directly compact memory as required. +compaction_proactiveness +======================== + +This tunable takes a value in the range [0, 100] with a default value of +20. This tunable determines how aggressively compaction is done in the +background. Setting it to 0 disables proactive compaction. + +Note that compaction has a non-trivial system-wide impact as pages +belonging to different processes are moved around, which could also lead +to latency spikes in unsuspecting applications. The kernel employs +various heuristics to avoid wasting CPU cycles if it detects that +proactive compaction is not being effective. + +Be careful when setting it to extreme values like 100, as that may +cause excessive background compaction activity. compact_unevictable_allowed =========================== diff --git a/Documentation/block/bfq-iosched.rst b/Documentation/block/bfq-iosched.rst index 19d4d1570cee..b9fbac0a6b7c 100644 --- a/Documentation/block/bfq-iosched.rst +++ b/Documentation/block/bfq-iosched.rst @@ -574,7 +574,7 @@ applications. Unset this tunable if you need/want to control weights. Scheduler", Proceedings of the First Workshop on Mobile System Technologies (MST-2015), May 2015. - http://algogroup.unimore.it/people/paolo/disk_sched/mst-2015.pdf + https://algogroup.unimore.it/people/paolo/disk_sched/mst-2015.pdf [2] P. Valente and M. Andreolini, "Improving Application @@ -584,7 +584,7 @@ applications. Unset this tunable if you need/want to control weights. Slightly extended version: - http://algogroup.unimore.it/people/paolo/disk_sched/bfq-v1-suite-results.pdf + https://algogroup.unimore.it/people/paolo/disk_sched/bfq-v1-suite-results.pdf [3] https://github.com/Algodev-github/S diff --git a/Documentation/dontdiff b/Documentation/dontdiff index ef9519c32c55..e361fc95ca29 100644 --- a/Documentation/dontdiff +++ b/Documentation/dontdiff @@ -55,6 +55,7 @@ *.ver *.xml *.xz +*.zst *_MODULES *_vga16.c *~ diff --git a/Documentation/filesystems/proc.rst b/Documentation/filesystems/proc.rst index 996f3cfe7030..5b781a014e3e 100644 --- a/Documentation/filesystems/proc.rst +++ b/Documentation/filesystems/proc.rst @@ -47,6 +47,7 @@ fixes/update part 1.1 Stefani Seibold June 9 2009 3.10 /proc//timerslack_ns - Task timerslack value 3.11 /proc//patch_state - Livepatch patch operation state 3.12 /proc//arch_status - Task architecture specific information + 3.13 /proc//ksm - Remote KSM 4 Configuring procfs 4.1 Mount options @@ -2134,6 +2135,19 @@ AVX512_elapsed_ms: the task is unlikely an AVX512 user, but depends on the workload and the scheduling scenario, it also could be a false negative mentioned above. +3.13 /proc//ksm - Remote KSM +------------------------------------ +This write-only file allows marking memory of another task for merging +and unmerging via KSM. + +The following actions are available: + + * mark task's memory as mergeable: + # echo merge > /proc//ksm + + * unmerging all the task's memory: + # echo unmerge > /proc//ksm + Configuring procfs ------------------ diff --git a/Documentation/x86/boot.rst b/Documentation/x86/boot.rst index 5325c71ca877..7fafc7ac00d7 100644 --- a/Documentation/x86/boot.rst +++ b/Documentation/x86/boot.rst @@ -782,9 +782,9 @@ Protocol: 2.08+ uncompressed data should be determined using the standard magic numbers. The currently supported compression formats are gzip (magic numbers 1F 8B or 1F 9E), bzip2 (magic number 42 5A), LZMA - (magic number 5D 00), XZ (magic number FD 37), and LZ4 (magic number - 02 21). The uncompressed payload is currently always ELF (magic - number 7F 45 4C 46). + (magic number 5D 00), XZ (magic number FD 37), LZ4 (magic number + 02 21) and ZSTD (magic number 28 B5). The uncompressed payload is + currently always ELF (magic number 7F 45 4C 46). ============ ============== Field name: payload_length diff --git a/Documentation/x86/x86_64/fsgs.rst b/Documentation/x86/x86_64/fsgs.rst new file mode 100644 index 000000000000..50960e09e1f6 --- /dev/null +++ b/Documentation/x86/x86_64/fsgs.rst @@ -0,0 +1,199 @@ +.. SPDX-License-Identifier: GPL-2.0 + +Using FS and GS segments in user space applications +=================================================== + +The x86 architecture supports segmentation. Instructions which access +memory can use segment register based addressing mode. The following +notation is used to address a byte within a segment: + + Segment-register:Byte-address + +The segment base address is added to the Byte-address to compute the +resulting virtual address which is accessed. This allows to access multiple +instances of data with the identical Byte-address, i.e. the same code. The +selection of a particular instance is purely based on the base-address in +the segment register. + +In 32-bit mode the CPU provides 6 segments, which also support segment +limits. The limits can be used to enforce address space protections. + +In 64-bit mode the CS/SS/DS/ES segments are ignored and the base address is +always 0 to provide a full 64bit address space. The FS and GS segments are +still functional in 64-bit mode. + +Common FS and GS usage +------------------------------ + +The FS segment is commonly used to address Thread Local Storage (TLS). FS +is usually managed by runtime code or a threading library. Variables +declared with the '__thread' storage class specifier are instantiated per +thread and the compiler emits the FS: address prefix for accesses to these +variables. Each thread has its own FS base address so common code can be +used without complex address offset calculations to access the per thread +instances. Applications should not use FS for other purposes when they use +runtimes or threading libraries which manage the per thread FS. + +The GS segment has no common use and can be used freely by +applications. GCC and Clang support GS based addressing via address space +identifiers. + +Reading and writing the FS/GS base address +------------------------------------------ + +There exist two mechanisms to read and write the FS/GS base address: + + - the arch_prctl() system call + + - the FSGSBASE instruction family + +Accessing FS/GS base with arch_prctl() +-------------------------------------- + + The arch_prctl(2) based mechanism is available on all 64-bit CPUs and all + kernel versions. + + Reading the base: + + arch_prctl(ARCH_GET_FS, &fsbase); + arch_prctl(ARCH_GET_GS, &gsbase); + + Writing the base: + + arch_prctl(ARCH_SET_FS, fsbase); + arch_prctl(ARCH_SET_GS, gsbase); + + The ARCH_SET_GS prctl may be disabled depending on kernel configuration + and security settings. + +Accessing FS/GS base with the FSGSBASE instructions +--------------------------------------------------- + + With the Ivy Bridge CPU generation Intel introduced a new set of + instructions to access the FS and GS base registers directly from user + space. These instructions are also supported on AMD Family 17H CPUs. The + following instructions are available: + + =============== =========================== + RDFSBASE %reg Read the FS base register + RDGSBASE %reg Read the GS base register + WRFSBASE %reg Write the FS base register + WRGSBASE %reg Write the GS base register + =============== =========================== + + The instructions avoid the overhead of the arch_prctl() syscall and allow + more flexible usage of the FS/GS addressing modes in user space + applications. This does not prevent conflicts between threading libraries + and runtimes which utilize FS and applications which want to use it for + their own purpose. + +FSGSBASE instructions enablement +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + The instructions are enumerated in CPUID leaf 7, bit 0 of EBX. If + available /proc/cpuinfo shows 'fsgsbase' in the flag entry of the CPUs. + + The availability of the instructions does not enable them + automatically. The kernel has to enable them explicitly in CR4. The + reason for this is that older kernels make assumptions about the values in + the GS register and enforce them when GS base is set via + arch_prctl(). Allowing user space to write arbitrary values to GS base + would violate these assumptions and cause malfunction. + + On kernels which do not enable FSGSBASE the execution of the FSGSBASE + instructions will fault with a #UD exception. + + The kernel provides reliable information about the enabled state in the + ELF AUX vector. If the HWCAP2_FSGSBASE bit is set in the AUX vector, the + kernel has FSGSBASE instructions enabled and applications can use them. + The following code example shows how this detection works:: + + #include + #include + + /* Will be eventually in asm/hwcap.h */ + #ifndef HWCAP2_FSGSBASE + #define HWCAP2_FSGSBASE (1 << 1) + #endif + + .... + + unsigned val = getauxval(AT_HWCAP2); + + if (val & HWCAP2_FSGSBASE) + printf("FSGSBASE enabled\n"); + +FSGSBASE instructions compiler support +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +GCC version 4.6.4 and newer provide instrinsics for the FSGSBASE +instructions. Clang 5 supports them as well. + + =================== =========================== + _readfsbase_u64() Read the FS base register + _readfsbase_u64() Read the GS base register + _writefsbase_u64() Write the FS base register + _writegsbase_u64() Write the GS base register + =================== =========================== + +To utilize these instrinsics must be included in the source +code and the compiler option -mfsgsbase has to be added. + +Compiler support for FS/GS based addressing +------------------------------------------- + +GCC version 6 and newer provide support for FS/GS based addressing via +Named Address Spaces. GCC implements the following address space +identifiers for x86: + + ========= ==================================== + __seg_fs Variable is addressed relative to FS + __seg_gs Variable is addressed relative to GS + ========= ==================================== + +The preprocessor symbols __SEG_FS and __SEG_GS are defined when these +address spaces are supported. Code which implements fallback modes should +check whether these symbols are defined. Usage example:: + + #ifdef __SEG_GS + + long data0 = 0; + long data1 = 1; + + long __seg_gs *ptr; + + /* Check whether FSGSBASE is enabled by the kernel (HWCAP2_FSGSBASE) */ + .... + + /* Set GS base to point to data0 */ + _writegsbase_u64(&data0); + + /* Access offset 0 of GS */ + ptr = 0; + printf("data0 = %ld\n", *ptr); + + /* Set GS base to point to data1 */ + _writegsbase_u64(&data1); + /* ptr still addresses offset 0! */ + printf("data1 = %ld\n", *ptr); + + +Clang does not provide the GCC address space identifiers, but it provides +address spaces via an attribute based mechanism in Clang 2.6 and newer +versions: + + ==================================== ===================================== + __attribute__((address_space(256)) Variable is addressed relative to GS + __attribute__((address_space(257)) Variable is addressed relative to FS + ==================================== ===================================== + +FS/GS based addressing with inline assembly +------------------------------------------- + +In case the compiler does not support address spaces, inline assembly can +be used for FS/GS based addressing mode:: + + mov %fs:offset, %reg + mov %gs:offset, %reg + + mov %reg, %fs:offset + mov %reg, %gs:offset diff --git a/Documentation/x86/x86_64/index.rst b/Documentation/x86/x86_64/index.rst index d6eaaa5a35fc..a56070fc8e77 100644 --- a/Documentation/x86/x86_64/index.rst +++ b/Documentation/x86/x86_64/index.rst @@ -14,3 +14,4 @@ x86_64 Support fake-numa-for-cpusets cpu-hotplug-spec machinecheck + fsgs diff --git a/Makefile b/Makefile index 24a4c1b97bb0..9a609a3d7af1 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ VERSION = 5 PATCHLEVEL = 8 SUBLEVEL = 0 -EXTRAVERSION = +EXTRAVERSION = -pf1 NAME = Kleptomaniac Octopus # *DOCUMENTATION* @@ -464,6 +464,7 @@ KLZOP = lzop LZMA = lzma LZ4 = lz4c XZ = xz +ZSTD = zstd CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \ -Wbitwise -Wno-return-void -Wno-unknown-attribute $(CF) @@ -512,7 +513,7 @@ CLANG_FLAGS := export ARCH SRCARCH CONFIG_SHELL BASH HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE LD CC export CPP AR NM STRIP OBJCOPY OBJDUMP OBJSIZE READELF PAHOLE LEX YACC AWK INSTALLKERNEL export PERL PYTHON PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX -export KGZIP KBZIP2 KLZOP LZMA LZ4 XZ +export KGZIP KBZIP2 KLZOP LZMA LZ4 XZ ZSTD export KBUILD_HOSTCXXFLAGS KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS LDFLAGS_MODULE export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS KBUILD_LDFLAGS diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 883da0abf779..4a64395bc35d 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -188,6 +188,7 @@ config X86 select HAVE_KERNEL_LZMA select HAVE_KERNEL_LZO select HAVE_KERNEL_XZ + select HAVE_KERNEL_ZSTD select HAVE_KPROBES select HAVE_KPROBES_ON_FTRACE select HAVE_FUNCTION_ERROR_INJECTION diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index 814fe0d349b0..7b08e87fe797 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu @@ -123,6 +123,7 @@ config MPENTIUMM config MPENTIUM4 bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/older Xeon" depends on X86_32 + select X86_P6_NOP help Select this for Intel Pentium 4 chips. This includes the Pentium 4, Pentium D, P4-based Celeron and Xeon, and @@ -155,9 +156,8 @@ config MPENTIUM4 -Paxville -Dempsey - config MK6 - bool "K6/K6-II/K6-III" + bool "AMD K6/K6-II/K6-III" depends on X86_32 help Select this for an AMD K6-family processor. Enables use of @@ -165,7 +165,7 @@ config MK6 flags to GCC. config MK7 - bool "Athlon/Duron/K7" + bool "AMD Athlon/Duron/K7" depends on X86_32 help Select this for an AMD Athlon K7-family processor. Enables use of @@ -173,12 +173,90 @@ config MK7 flags to GCC. config MK8 - bool "Opteron/Athlon64/Hammer/K8" + bool "AMD Opteron/Athlon64/Hammer/K8" help Select this for an AMD Opteron or Athlon64 Hammer-family processor. Enables use of some extended instructions, and passes appropriate optimization flags to GCC. +config MK8SSE3 + bool "AMD Opteron/Athlon64/Hammer/K8 with SSE3" + help + Select this for improved AMD Opteron or Athlon64 Hammer-family processors. + Enables use of some extended instructions, and passes appropriate + optimization flags to GCC. + +config MK10 + bool "AMD 61xx/7x50/PhenomX3/X4/II/K10" + help + Select this for an AMD 61xx Eight-Core Magny-Cours, Athlon X2 7x50, + Phenom X3/X4/II, Athlon II X2/X3/X4, or Turion II-family processor. + Enables use of some extended instructions, and passes appropriate + optimization flags to GCC. + +config MBARCELONA + bool "AMD Barcelona" + help + Select this for AMD Family 10h Barcelona processors. + + Enables -march=barcelona + +config MBOBCAT + bool "AMD Bobcat" + help + Select this for AMD Family 14h Bobcat processors. + + Enables -march=btver1 + +config MJAGUAR + bool "AMD Jaguar" + help + Select this for AMD Family 16h Jaguar processors. + + Enables -march=btver2 + +config MBULLDOZER + bool "AMD Bulldozer" + help + Select this for AMD Family 15h Bulldozer processors. + + Enables -march=bdver1 + +config MPILEDRIVER + bool "AMD Piledriver" + help + Select this for AMD Family 15h Piledriver processors. + + Enables -march=bdver2 + +config MSTEAMROLLER + bool "AMD Steamroller" + help + Select this for AMD Family 15h Steamroller processors. + + Enables -march=bdver3 + +config MEXCAVATOR + bool "AMD Excavator" + help + Select this for AMD Family 15h Excavator processors. + + Enables -march=bdver4 + +config MZEN + bool "AMD Zen" + help + Select this for AMD Family 17h Zen processors. + + Enables -march=znver1 + +config MZEN2 + bool "AMD Zen 2" + help + Select this for AMD Family 17h Zen 2 processors. + + Enables -march=znver2 + config MCRUSOE bool "Crusoe" depends on X86_32 @@ -260,6 +338,7 @@ config MVIAC7 config MPSC bool "Intel P4 / older Netburst based Xeon" + select X86_P6_NOP depends on X86_64 help Optimize for Intel Pentium 4, Pentium D and older Nocona/Dempsey @@ -269,8 +348,19 @@ config MPSC using the cpu family field in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one. +config MATOM + bool "Intel Atom" + select X86_P6_NOP + help + + Select this for the Intel Atom platform. Intel Atom CPUs have an + in-order pipelining architecture and thus can benefit from + accordingly optimized code. Use a recent GCC with specific Atom + support in order to fully benefit from selecting this option. + config MCORE2 - bool "Core 2/newer Xeon" + bool "Intel Core 2" + select X86_P6_NOP help Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and @@ -278,14 +368,151 @@ config MCORE2 family in /proc/cpuinfo. Newer ones have 6 and older ones 15 (not a typo) -config MATOM - bool "Intel Atom" + Enables -march=core2 + +config MNEHALEM + bool "Intel Nehalem" + select X86_P6_NOP help - Select this for the Intel Atom platform. Intel Atom CPUs have an - in-order pipelining architecture and thus can benefit from - accordingly optimized code. Use a recent GCC with specific Atom - support in order to fully benefit from selecting this option. + Select this for 1st Gen Core processors in the Nehalem family. + + Enables -march=nehalem + +config MWESTMERE + bool "Intel Westmere" + select X86_P6_NOP + help + + Select this for the Intel Westmere formerly Nehalem-C family. + + Enables -march=westmere + +config MSILVERMONT + bool "Intel Silvermont" + select X86_P6_NOP + help + + Select this for the Intel Silvermont platform. + + Enables -march=silvermont + +config MGOLDMONT + bool "Intel Goldmont" + select X86_P6_NOP + help + + Select this for the Intel Goldmont platform including Apollo Lake and Denverton. + + Enables -march=goldmont + +config MGOLDMONTPLUS + bool "Intel Goldmont Plus" + select X86_P6_NOP + help + + Select this for the Intel Goldmont Plus platform including Gemini Lake. + + Enables -march=goldmont-plus + +config MSANDYBRIDGE + bool "Intel Sandy Bridge" + select X86_P6_NOP + help + + Select this for 2nd Gen Core processors in the Sandy Bridge family. + + Enables -march=sandybridge + +config MIVYBRIDGE + bool "Intel Ivy Bridge" + select X86_P6_NOP + help + + Select this for 3rd Gen Core processors in the Ivy Bridge family. + + Enables -march=ivybridge + +config MHASWELL + bool "Intel Haswell" + select X86_P6_NOP + help + + Select this for 4th Gen Core processors in the Haswell family. + + Enables -march=haswell + +config MBROADWELL + bool "Intel Broadwell" + select X86_P6_NOP + help + + Select this for 5th Gen Core processors in the Broadwell family. + + Enables -march=broadwell + +config MSKYLAKE + bool "Intel Skylake" + select X86_P6_NOP + help + + Select this for 6th Gen Core processors in the Skylake family. + + Enables -march=skylake + +config MSKYLAKEX + bool "Intel Skylake X" + select X86_P6_NOP + help + + Select this for 6th Gen Core processors in the Skylake X family. + + Enables -march=skylake-avx512 + +config MCANNONLAKE + bool "Intel Cannon Lake" + select X86_P6_NOP + help + + Select this for 8th Gen Core processors + + Enables -march=cannonlake + +config MICELAKE + bool "Intel Ice Lake" + select X86_P6_NOP + help + + Select this for 10th Gen Core processors in the Ice Lake family. + + Enables -march=icelake-client + +config MCASCADELAKE + bool "Intel Cascade Lake" + select X86_P6_NOP + help + + Select this for Xeon processors in the Cascade Lake family. + + Enables -march=cascadelake + +config MCOOPERLAKE + bool "Intel Cooper Lake" + select X86_P6_NOP + help + + Select this for Xeon processors in the Cooper Lake family. + + Enables -march=cooperlake + +config MTIGERLAKE + bool "Intel Tiger Lake" + select X86_P6_NOP + help + + Select this for third-generation 10 nm process processors in the Tiger Lake family. + + Enables -march=tigerlake config GENERIC_CPU bool "Generic-x86-64" @@ -294,6 +521,19 @@ config GENERIC_CPU Generic x86-64 CPU. Run equally well on all x86-64 CPUs. +config MNATIVE + bool "Native optimizations autodetected by GCC" + help + + GCC 4.2 and above support -march=native, which automatically detects + the optimum settings to use based on your processor. -march=native + also detects and applies additional settings beyond -march specific + to your CPU, (eg. -msse4). Unless you have a specific reason not to + (e.g. distcc cross-compiling), you should probably be using + -march=native rather than anything listed below. + + Enables -march=native + endchoice config X86_GENERIC @@ -318,7 +558,7 @@ config X86_INTERNODE_CACHE_SHIFT config X86_L1_CACHE_SHIFT int default "7" if MPENTIUM4 || MPSC - default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU + default "6" if MK7 || MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MJAGUAR || MPENTIUMM || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MNATIVE || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU default "4" if MELAN || M486SX || M486 || MGEODEGX1 default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX @@ -336,35 +576,36 @@ config X86_ALIGNMENT_16 config X86_INTEL_USERCOPY def_bool y - depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2 + depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK8SSE3 || MK7 || MEFFICEON || MCORE2 || MK10 || MBARCELONA || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MNATIVE config X86_USE_PPRO_CHECKSUM def_bool y - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MK10 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MK8SSE3 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MATOM || MNATIVE config X86_USE_3DNOW def_bool y depends on (MCYRIXIII || MK7 || MGEODE_LX) && !UML -# -# P6_NOPs are a relatively minor optimization that require a family >= -# 6 processor, except that it is broken on certain VIA chips. -# Furthermore, AMD chips prefer a totally different sequence of NOPs -# (which work on all CPUs). In addition, it looks like Virtual PC -# does not understand them. -# -# As a result, disallow these if we're not compiling for X86_64 (these -# NOPs do work on all x86-64 capable chips); the list of processors in -# the right-hand clause are the cores that benefit from this optimization. -# config X86_P6_NOP - def_bool y - depends on X86_64 - depends on (MCORE2 || MPENTIUM4 || MPSC) + default n + bool "Support for P6_NOPs on Intel chips" + depends on (MCORE2 || MPENTIUM4 || MPSC || MATOM || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MNATIVE) + help + P6_NOPs are a relatively minor optimization that require a family >= + 6 processor, except that it is broken on certain VIA chips. + Furthermore, AMD chips prefer a totally different sequence of NOPs + (which work on all CPUs). In addition, it looks like Virtual PC + does not understand them. + + As a result, disallow these if we're not compiling for X86_64 (these + NOPs do work on all x86-64 capable chips); the list of processors in + the right-hand clause are the cores that benefit from this optimization. + + Say Y if you have Intel CPU newer than Pentium Pro, N otherwise. config X86_TSC def_bool y - depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) || X86_64 + depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MK8SSE3 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MNATIVE || MATOM) || X86_64 config X86_CMPXCHG64 def_bool y @@ -374,7 +615,7 @@ config X86_CMPXCHG64 # generates cmov. config X86_CMOV def_bool y - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX) + depends on (MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MJAGUAR || MK7 || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MNATIVE || MATOM || MGEODE_LX) config X86_MINIMUM_CPU_FAMILY int diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 00e378de8bc0..1145cee823a7 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -119,13 +119,60 @@ else KBUILD_CFLAGS += $(call cc-option,-mskip-rax-setup) # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu) + cflags-$(CONFIG_MNATIVE) += $(call cc-option,-march=native) cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8) + cflags-$(CONFIG_MK8SSE3) += $(call cc-option,-march=k8-sse3,-mtune=k8) + cflags-$(CONFIG_MK10) += $(call cc-option,-march=amdfam10) + cflags-$(CONFIG_MBARCELONA) += $(call cc-option,-march=barcelona) + cflags-$(CONFIG_MBOBCAT) += $(call cc-option,-march=btver1) + cflags-$(CONFIG_MJAGUAR) += $(call cc-option,-march=btver2) + cflags-$(CONFIG_MBULLDOZER) += $(call cc-option,-march=bdver1) + cflags-$(CONFIG_MPILEDRIVER) += $(call cc-option,-march=bdver2) + cflags-$(CONFIG_MPILEDRIVER) += $(call cc-option,-mno-tbm) + cflags-$(CONFIG_MSTEAMROLLER) += $(call cc-option,-march=bdver3) + cflags-$(CONFIG_MSTEAMROLLER) += $(call cc-option,-mno-tbm) + cflags-$(CONFIG_MEXCAVATOR) += $(call cc-option,-march=bdver4) + cflags-$(CONFIG_MEXCAVATOR) += $(call cc-option,-mno-tbm) + cflags-$(CONFIG_MZEN) += $(call cc-option,-march=znver1) + cflags-$(CONFIG_MZEN2) += $(call cc-option,-march=znver2) cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona) cflags-$(CONFIG_MCORE2) += \ - $(call cc-option,-march=core2,$(call cc-option,-mtune=generic)) - cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom) \ - $(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic)) + $(call cc-option,-march=core2,$(call cc-option,-mtune=core2)) + cflags-$(CONFIG_MNEHALEM) += \ + $(call cc-option,-march=nehalem,$(call cc-option,-mtune=nehalem)) + cflags-$(CONFIG_MWESTMERE) += \ + $(call cc-option,-march=westmere,$(call cc-option,-mtune=westmere)) + cflags-$(CONFIG_MSILVERMONT) += \ + $(call cc-option,-march=silvermont,$(call cc-option,-mtune=silvermont)) + cflags-$(CONFIG_MGOLDMONT) += \ + $(call cc-option,-march=goldmont,$(call cc-option,-mtune=goldmont)) + cflags-$(CONFIG_MGOLDMONTPLUS) += \ + $(call cc-option,-march=goldmont-plus,$(call cc-option,-mtune=goldmont-plus)) + cflags-$(CONFIG_MSANDYBRIDGE) += \ + $(call cc-option,-march=sandybridge,$(call cc-option,-mtune=sandybridge)) + cflags-$(CONFIG_MIVYBRIDGE) += \ + $(call cc-option,-march=ivybridge,$(call cc-option,-mtune=ivybridge)) + cflags-$(CONFIG_MHASWELL) += \ + $(call cc-option,-march=haswell,$(call cc-option,-mtune=haswell)) + cflags-$(CONFIG_MBROADWELL) += \ + $(call cc-option,-march=broadwell,$(call cc-option,-mtune=broadwell)) + cflags-$(CONFIG_MSKYLAKE) += \ + $(call cc-option,-march=skylake,$(call cc-option,-mtune=skylake)) + cflags-$(CONFIG_MSKYLAKEX) += \ + $(call cc-option,-march=skylake-avx512,$(call cc-option,-mtune=skylake-avx512)) + cflags-$(CONFIG_MCANNONLAKE) += \ + $(call cc-option,-march=cannonlake,$(call cc-option,-mtune=cannonlake)) + cflags-$(CONFIG_MICELAKE) += \ + $(call cc-option,-march=icelake-client,$(call cc-option,-mtune=icelake-client)) + cflags-$(CONFIG_MCASCADELAKE) += \ + $(call cc-option,-march=cascadelake,$(call cc-option,-mtune=cascadelake)) + cflags-$(CONFIG_MCOOPERLAKE) += \ + $(call cc-option,-march=cooperlake,$(call cc-option,-mtune=cooperlake)) + cflags-$(CONFIG_MTIGERLAKE) += \ + $(call cc-option,-march=tigerlake,$(call cc-option,-mtune=tigerlake)) + cflags-$(CONFIG_MATOM) += $(call cc-option,-march=bonnell) \ + $(call cc-option,-mtune=bonnell,$(call cc-option,-mtune=generic)) cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic) KBUILD_CFLAGS += $(cflags-y) diff --git a/arch/x86/Makefile_32.cpu b/arch/x86/Makefile_32.cpu index cd3056759880..cb0a4c6bd987 100644 --- a/arch/x86/Makefile_32.cpu +++ b/arch/x86/Makefile_32.cpu @@ -24,7 +24,19 @@ cflags-$(CONFIG_MK6) += -march=k6 # Please note, that patches that add -march=athlon-xp and friends are pointless. # They make zero difference whatsosever to performance at this time. cflags-$(CONFIG_MK7) += -march=athlon +cflags-$(CONFIG_MNATIVE) += $(call cc-option,-march=native) cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8,-march=athlon) +cflags-$(CONFIG_MK8SSE3) += $(call cc-option,-march=k8-sse3,-march=athlon) +cflags-$(CONFIG_MK10) += $(call cc-option,-march=amdfam10,-march=athlon) +cflags-$(CONFIG_MBARCELONA) += $(call cc-option,-march=barcelona,-march=athlon) +cflags-$(CONFIG_MBOBCAT) += $(call cc-option,-march=btver1,-march=athlon) +cflags-$(CONFIG_MJAGUAR) += $(call cc-option,-march=btver2,-march=athlon) +cflags-$(CONFIG_MBULLDOZER) += $(call cc-option,-march=bdver1,-march=athlon) +cflags-$(CONFIG_MPILEDRIVER) += $(call cc-option,-march=bdver2,-march=athlon) +cflags-$(CONFIG_MSTEAMROLLER) += $(call cc-option,-march=bdver3,-march=athlon) +cflags-$(CONFIG_MEXCAVATOR) += $(call cc-option,-march=bdver4,-march=athlon) +cflags-$(CONFIG_MZEN) += $(call cc-option,-march=znver1,-march=athlon) +cflags-$(CONFIG_MZEN2) += $(call cc-option,-march=znver2,-march=athlon) cflags-$(CONFIG_MCRUSOE) += -march=i686 -falign-functions=0 -falign-jumps=0 -falign-loops=0 cflags-$(CONFIG_MEFFICEON) += -march=i686 $(call tune,pentium3) -falign-functions=0 -falign-jumps=0 -falign-loops=0 cflags-$(CONFIG_MWINCHIPC6) += $(call cc-option,-march=winchip-c6,-march=i586) @@ -33,8 +45,24 @@ cflags-$(CONFIG_MCYRIXIII) += $(call cc-option,-march=c3,-march=i486) -falign-fu cflags-$(CONFIG_MVIAC3_2) += $(call cc-option,-march=c3-2,-march=i686) cflags-$(CONFIG_MVIAC7) += -march=i686 cflags-$(CONFIG_MCORE2) += -march=i686 $(call tune,core2) -cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom,$(call cc-option,-march=core2,-march=i686)) \ - $(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic)) +cflags-$(CONFIG_MNEHALEM) += -march=i686 $(call tune,nehalem) +cflags-$(CONFIG_MWESTMERE) += -march=i686 $(call tune,westmere) +cflags-$(CONFIG_MSILVERMONT) += -march=i686 $(call tune,silvermont) +cflags-$(CONFIG_MGOLDMONT) += -march=i686 $(call tune,goldmont) +cflags-$(CONFIG_MGOLDMONTPLUS) += -march=i686 $(call tune,goldmont-plus) +cflags-$(CONFIG_MSANDYBRIDGE) += -march=i686 $(call tune,sandybridge) +cflags-$(CONFIG_MIVYBRIDGE) += -march=i686 $(call tune,ivybridge) +cflags-$(CONFIG_MHASWELL) += -march=i686 $(call tune,haswell) +cflags-$(CONFIG_MBROADWELL) += -march=i686 $(call tune,broadwell) +cflags-$(CONFIG_MSKYLAKE) += -march=i686 $(call tune,skylake) +cflags-$(CONFIG_MSKYLAKEX) += -march=i686 $(call tune,skylake-avx512) +cflags-$(CONFIG_MCANNONLAKE) += -march=i686 $(call tune,cannonlake) +cflags-$(CONFIG_MICELAKE) += -march=i686 $(call tune,icelake-client) +cflags-$(CONFIG_MCASCADELAKE) += -march=i686 $(call tune,cascadelake) +cflags-$(CONFIG_MCOOPERLAKE) += -march=i686 $(call tune,cooperlake) +cflags-$(CONFIG_MTIGERLAKE) += -march=i686 $(call tune,tigerlake) +cflags-$(CONFIG_MATOM) += $(call cc-option,-march=bonnell,$(call cc-option,-march=core2,-march=i686)) \ + $(call cc-option,-mtune=bonnell,$(call cc-option,-mtune=generic)) # AMD Elan support cflags-$(CONFIG_MELAN) += -march=i486 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 5a828fde7a42..c08714ae76ec 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -26,7 +26,7 @@ OBJECT_FILES_NON_STANDARD := y KCOV_INSTRUMENT := n targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \ - vmlinux.bin.xz vmlinux.bin.lzo vmlinux.bin.lz4 + vmlinux.bin.xz vmlinux.bin.lzo vmlinux.bin.lz4 vmlinux.bin.zst KBUILD_CFLAGS := -m$(BITS) -O2 KBUILD_CFLAGS += -fno-strict-aliasing $(call cc-option, -fPIE, -fPIC) @@ -42,6 +42,7 @@ KBUILD_CFLAGS += $(call cc-disable-warning, gnu) KBUILD_CFLAGS += -Wno-pointer-sign KBUILD_CFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=) KBUILD_CFLAGS += -fno-asynchronous-unwind-tables +KBUILD_CFLAGS += -D__DISABLE_EXPORTS KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ GCOV_PROFILE := n @@ -145,6 +146,8 @@ $(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) FORCE $(call if_changed,lzo) $(obj)/vmlinux.bin.lz4: $(vmlinux.bin.all-y) FORCE $(call if_changed,lz4) +$(obj)/vmlinux.bin.zst: $(vmlinux.bin.all-y) FORCE + $(call if_changed,zstd22) suffix-$(CONFIG_KERNEL_GZIP) := gz suffix-$(CONFIG_KERNEL_BZIP2) := bz2 @@ -152,6 +155,7 @@ suffix-$(CONFIG_KERNEL_LZMA) := lzma suffix-$(CONFIG_KERNEL_XZ) := xz suffix-$(CONFIG_KERNEL_LZO) := lzo suffix-$(CONFIG_KERNEL_LZ4) := lz4 +suffix-$(CONFIG_KERNEL_ZSTD) := zst quiet_cmd_mkpiggy = MKPIGGY $@ cmd_mkpiggy = $(obj)/mkpiggy $< > $@ diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c index d7408af55738..0048269180d5 100644 --- a/arch/x86/boot/compressed/kaslr.c +++ b/arch/x86/boot/compressed/kaslr.c @@ -19,13 +19,6 @@ */ #define BOOT_CTYPE_H -/* - * _ctype[] in lib/ctype.c is needed by isspace() of linux/ctype.h. - * While both lib/ctype.c and lib/cmdline.c will bring EXPORT_SYMBOL - * which is meaningless and will cause compiling error in some cases. - */ -#define __DISABLE_EXPORTS - #include "misc.h" #include "error.h" #include "../string.h" diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index 9652d5c2afda..e478e40fbe5a 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c @@ -30,12 +30,9 @@ #define STATIC static /* - * Use normal definitions of mem*() from string.c. There are already - * included header files which expect a definition of memset() and by - * the time we define memset macro, it is too late. + * Provide definitions of memzero and memmove as some of the decompressors will + * try to define their own functions if these are not defined as macros. */ -#undef memcpy -#undef memset #define memzero(s, n) memset((s), 0, (n)) #define memmove memmove @@ -77,6 +74,10 @@ static int lines, cols; #ifdef CONFIG_KERNEL_LZ4 #include "../../../../lib/decompress_unlz4.c" #endif + +#ifdef CONFIG_KERNEL_ZSTD +#include "../../../../lib/decompress_unzstd.c" +#endif /* * NOTE: When adding a new decompressor, please update the analysis in * ../header.S. diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S index 735ad7f21ab0..6dbd7e9f74c9 100644 --- a/arch/x86/boot/header.S +++ b/arch/x86/boot/header.S @@ -539,8 +539,14 @@ pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr # the size-dependent part now grows so fast. # # extra_bytes = (uncompressed_size >> 8) + 65536 +# +# ZSTD compressed data grows by at most 3 bytes per 128K, and only has a 22 +# byte fixed overhead but has a maximum block size of 128K, so it needs a +# larger margin. +# +# extra_bytes = (uncompressed_size >> 8) + 131072 -#define ZO_z_extra_bytes ((ZO_z_output_len >> 8) + 65536) +#define ZO_z_extra_bytes ((ZO_z_output_len >> 8) + 131072) #if ZO_z_output_len > ZO_z_input_len # define ZO_z_extract_offset (ZO_z_output_len + ZO_z_extra_bytes - \ ZO_z_input_len) diff --git a/arch/x86/boot/string.h b/arch/x86/boot/string.h index 995f7b7ad512..a232da487cd2 100644 --- a/arch/x86/boot/string.h +++ b/arch/x86/boot/string.h @@ -11,10 +11,7 @@ void *memcpy(void *dst, const void *src, size_t len); void *memset(void *dst, int c, size_t len); int memcmp(const void *s1, const void *s2, size_t len); -/* - * Access builtin version by default. If one needs to use optimized version, - * do "undef memcpy" in .c file and link against right string.c - */ +/* Access builtin version by default. */ #define memcpy(d,s,l) __builtin_memcpy(d,s,l) #define memset(d,c,l) __builtin_memset(d,c,l) #define memcmp __builtin_memcmp diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index 4208c1e3f601..98e4d8886f11 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -6,6 +6,7 @@ #include #include #include +#include /* @@ -341,6 +342,12 @@ For 32-bit we have the following conventions - kernel is built with #endif .endm +.macro SAVE_AND_SET_GSBASE scratch_reg:req save_reg:req + rdgsbase \save_reg + GET_PERCPU_BASE \scratch_reg + wrgsbase \scratch_reg +.endm + #else /* CONFIG_X86_64 */ # undef UNWIND_HINT_IRET_REGS # define UNWIND_HINT_IRET_REGS @@ -351,3 +358,36 @@ For 32-bit we have the following conventions - kernel is built with call stackleak_erase #endif .endm + +#ifdef CONFIG_SMP + +/* + * CPU/node NR is loaded from the limit (size) field of a special segment + * descriptor entry in GDT. + */ +.macro LOAD_CPU_AND_NODE_SEG_LIMIT reg:req + movq $__CPUNODE_SEG, \reg + lsl \reg, \reg +.endm + +/* + * Fetch the per-CPU GSBASE value for this processor and put it in @reg. + * We normally use %gs for accessing per-CPU data, but we are setting up + * %gs here and obviously can not use %gs itself to access per-CPU data. + */ +.macro GET_PERCPU_BASE reg:req + ALTERNATIVE \ + "LOAD_CPU_AND_NODE_SEG_LIMIT \reg", \ + "RDPID \reg", \ + X86_FEATURE_RDPID + andq $VDSO_CPUNODE_MASK, \reg + movq __per_cpu_offset(, \reg, 8), \reg +.endm + +#else + +.macro GET_PERCPU_BASE reg:req + movq pcpu_unit_offsets(%rip), \reg +.endm + +#endif /* CONFIG_SMP */ diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index d2a00c97e53f..fb729f4c4fbc 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -38,6 +38,7 @@ #include #include #include +#include #include #include "calling.h" @@ -426,10 +427,7 @@ SYM_CODE_START(\asmsym) testb $3, CS-ORIG_RAX(%rsp) jnz .Lfrom_usermode_switch_stack_\@ - /* - * paranoid_entry returns SWAPGS flag for paranoid_exit in EBX. - * EBX == 0 -> SWAPGS, EBX == 1 -> no SWAPGS - */ + /* paranoid_entry returns GS information for paranoid_exit in EBX. */ call paranoid_entry UNWIND_HINT_REGS @@ -458,10 +456,7 @@ SYM_CODE_START(\asmsym) UNWIND_HINT_IRET_REGS offset=8 ASM_CLAC - /* - * paranoid_entry returns SWAPGS flag for paranoid_exit in EBX. - * EBX == 0 -> SWAPGS, EBX == 1 -> no SWAPGS - */ + /* paranoid_entry returns GS information for paranoid_exit in EBX. */ call paranoid_entry UNWIND_HINT_REGS @@ -798,24 +793,21 @@ SYM_CODE_END(xen_failsafe_callback) #endif /* CONFIG_XEN_PV */ /* - * Save all registers in pt_regs, and switch gs if needed. - * Use slow, but surefire "are we in kernel?" check. - * Return: ebx=0: need swapgs on exit, ebx=1: otherwise + * Save all registers in pt_regs. Return GSBASE related information + * in EBX depending on the availability of the FSGSBASE instructions: + * + * FSGSBASE R/EBX + * N 0 -> SWAPGS on exit + * 1 -> no SWAPGS on exit + * + * Y GSBASE value at entry, must be restored in paranoid_exit */ SYM_CODE_START_LOCAL(paranoid_entry) UNWIND_HINT_FUNC cld PUSH_AND_CLEAR_REGS save_ret=1 ENCODE_FRAME_POINTER 8 - movl $1, %ebx - movl $MSR_GS_BASE, %ecx - rdmsr - testl %edx, %edx - js 1f /* negative -> in kernel */ - SWAPGS - xorl %ebx, %ebx -1: /* * Always stash CR3 in %r14. This value will be restored, * verbatim, at exit. Needed if paranoid_entry interrupted @@ -825,9 +817,51 @@ SYM_CODE_START_LOCAL(paranoid_entry) * This is also why CS (stashed in the "iret frame" by the * hardware at entry) can not be used: this may be a return * to kernel code, but with a user CR3 value. + * + * Switching CR3 does not depend on kernel GSBASE so it can + * be done before switching to the kernel GSBASE. This is + * required for FSGSBASE because the kernel GSBASE has to + * be retrieved from a kernel internal table. */ SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14 + /* + * Handling GSBASE depends on the availability of FSGSBASE. + * + * Without FSGSBASE the kernel enforces that negative GSBASE + * values indicate kernel GSBASE. With FSGSBASE no assumptions + * can be made about the GSBASE value when entering from user + * space. + */ + ALTERNATIVE "jmp .Lparanoid_entry_checkgs", "", X86_FEATURE_FSGSBASE + + /* + * Read the current GSBASE and store it in %rbx unconditionally, + * retrieve and set the current CPUs kernel GSBASE. The stored value + * has to be restored in paranoid_exit unconditionally. + * + * The MSR write ensures that no subsequent load is based on a + * mispredicted GSBASE. No extra FENCE required. + */ + SAVE_AND_SET_GSBASE scratch_reg=%rax save_reg=%rbx + ret + +.Lparanoid_entry_checkgs: + /* EBX = 1 -> kernel GSBASE active, no restore required */ + movl $1, %ebx + /* + * The kernel-enforced convention is a negative GSBASE indicates + * a kernel value. No SWAPGS needed on entry and exit. + */ + movl $MSR_GS_BASE, %ecx + rdmsr + testl %edx, %edx + jns .Lparanoid_entry_swapgs + ret + +.Lparanoid_entry_swapgs: + SWAPGS + /* * The above SAVE_AND_SWITCH_TO_KERNEL_CR3 macro doesn't do an * unconditional CR3 write, even in the PTI case. So do an lfence @@ -835,6 +869,8 @@ SYM_CODE_START_LOCAL(paranoid_entry) */ FENCE_SWAPGS_KERNEL_ENTRY + /* EBX = 0 -> SWAPGS required on exit */ + xorl %ebx, %ebx ret SYM_CODE_END(paranoid_entry) @@ -845,23 +881,45 @@ SYM_CODE_END(paranoid_entry) * * We may be returning to very strange contexts (e.g. very early * in syscall entry), so checking for preemption here would - * be complicated. Fortunately, we there's no good reason - * to try to handle preemption here. + * be complicated. Fortunately, there's no good reason to try + * to handle preemption here. + * + * R/EBX contains the GSBASE related information depending on the + * availability of the FSGSBASE instructions: + * + * FSGSBASE R/EBX + * N 0 -> SWAPGS on exit + * 1 -> no SWAPGS on exit * - * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) + * Y User space GSBASE, must be restored unconditionally */ SYM_CODE_START_LOCAL(paranoid_exit) UNWIND_HINT_REGS - testl %ebx, %ebx /* swapgs needed? */ - jnz .Lparanoid_exit_no_swapgs - /* Always restore stashed CR3 value (see paranoid_entry) */ - RESTORE_CR3 scratch_reg=%rbx save_reg=%r14 + /* + * The order of operations is important. RESTORE_CR3 requires + * kernel GSBASE. + * + * NB to anyone to try to optimize this code: this code does + * not execute at all for exceptions from user mode. Those + * exceptions go through error_exit instead. + */ + RESTORE_CR3 scratch_reg=%rax save_reg=%r14 + + /* Handle the three GSBASE cases */ + ALTERNATIVE "jmp .Lparanoid_exit_checkgs", "", X86_FEATURE_FSGSBASE + + /* With FSGSBASE enabled, unconditionally restore GSBASE */ + wrgsbase %rbx + jmp restore_regs_and_return_to_kernel + +.Lparanoid_exit_checkgs: + /* On non-FSGSBASE systems, conditionally do SWAPGS */ + testl %ebx, %ebx + jnz restore_regs_and_return_to_kernel + + /* We are returning to a context with user GSBASE */ SWAPGS_UNSAFE_STACK - jmp restore_regs_and_return_to_kernel -.Lparanoid_exit_no_swapgs: - /* Always restore stashed CR3 value (see paranoid_entry) */ - RESTORE_CR3 scratch_reg=%rbx save_reg=%r14 - jmp restore_regs_and_return_to_kernel + jmp restore_regs_and_return_to_kernel SYM_CODE_END(paranoid_exit) /* @@ -1266,10 +1324,27 @@ end_repeat_nmi: /* Always restore stashed CR3 value (see paranoid_entry) */ RESTORE_CR3 scratch_reg=%r15 save_reg=%r14 - testl %ebx, %ebx /* swapgs needed? */ + /* + * The above invocation of paranoid_entry stored the GSBASE + * related information in R/EBX depending on the availability + * of FSGSBASE. + * + * If FSGSBASE is enabled, restore the saved GSBASE value + * unconditionally, otherwise take the conditional SWAPGS path. + */ + ALTERNATIVE "jmp nmi_no_fsgsbase", "", X86_FEATURE_FSGSBASE + + wrgsbase %rbx + jmp nmi_restore + +nmi_no_fsgsbase: + /* EBX == 0 -> invoke SWAPGS */ + testl %ebx, %ebx jnz nmi_restore + nmi_swapgs: SWAPGS_UNSAFE_STACK + nmi_restore: POP_REGS diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h index 680c320363db..9191280d9ea3 100644 --- a/arch/x86/include/asm/boot.h +++ b/arch/x86/include/asm/boot.h @@ -24,9 +24,16 @@ # error "Invalid value for CONFIG_PHYSICAL_ALIGN" #endif -#ifdef CONFIG_KERNEL_BZIP2 +#if defined(CONFIG_KERNEL_BZIP2) # define BOOT_HEAP_SIZE 0x400000 -#else /* !CONFIG_KERNEL_BZIP2 */ +#elif defined(CONFIG_KERNEL_ZSTD) +/* + * Zstd needs to allocate the ZSTD_DCtx in order to decompress the kernel. + * The ZSTD_DCtx is ~160KB, so set the heap size to 192KB because it is a + * round number and to allow some slack. + */ +# define BOOT_HEAP_SIZE 0x30000 +#else # define BOOT_HEAP_SIZE 0x10000 #endif diff --git a/arch/x86/include/asm/fsgsbase.h b/arch/x86/include/asm/fsgsbase.h index bca4c743de77..d552646411a9 100644 --- a/arch/x86/include/asm/fsgsbase.h +++ b/arch/x86/include/asm/fsgsbase.h @@ -19,36 +19,65 @@ extern unsigned long x86_gsbase_read_task(struct task_struct *task); extern void x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase); extern void x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase); -/* Helper functions for reading/writing FS/GS base */ +/* Must be protected by X86_FEATURE_FSGSBASE check. */ -static inline unsigned long x86_fsbase_read_cpu(void) +static __always_inline unsigned long rdfsbase(void) { unsigned long fsbase; - rdmsrl(MSR_FS_BASE, fsbase); + asm volatile("rdfsbase %0" : "=r" (fsbase) :: "memory"); return fsbase; } -static inline unsigned long x86_gsbase_read_cpu_inactive(void) +static __always_inline unsigned long rdgsbase(void) { unsigned long gsbase; - rdmsrl(MSR_KERNEL_GS_BASE, gsbase); + asm volatile("rdgsbase %0" : "=r" (gsbase) :: "memory"); return gsbase; } -static inline void x86_fsbase_write_cpu(unsigned long fsbase) +static __always_inline void wrfsbase(unsigned long fsbase) { - wrmsrl(MSR_FS_BASE, fsbase); + asm volatile("wrfsbase %0" :: "r" (fsbase) : "memory"); } -static inline void x86_gsbase_write_cpu_inactive(unsigned long gsbase) +static __always_inline void wrgsbase(unsigned long gsbase) { - wrmsrl(MSR_KERNEL_GS_BASE, gsbase); + asm volatile("wrgsbase %0" :: "r" (gsbase) : "memory"); } +#include + +/* Helper functions for reading/writing FS/GS base */ + +static inline unsigned long x86_fsbase_read_cpu(void) +{ + unsigned long fsbase; + + if (static_cpu_has(X86_FEATURE_FSGSBASE)) + fsbase = rdfsbase(); + else + rdmsrl(MSR_FS_BASE, fsbase); + + return fsbase; +} + +static inline void x86_fsbase_write_cpu(unsigned long fsbase) +{ + if (static_cpu_has(X86_FEATURE_FSGSBASE)) + wrfsbase(fsbase); + else + wrmsrl(MSR_FS_BASE, fsbase); +} + +extern unsigned long x86_gsbase_read_cpu_inactive(void); +extern void x86_gsbase_write_cpu_inactive(unsigned long gsbase); +extern unsigned long x86_fsgsbase_read_task(struct task_struct *task, + unsigned short selector); + #endif /* CONFIG_X86_64 */ #endif /* __ASSEMBLY__ */ diff --git a/arch/x86/include/asm/inst.h b/arch/x86/include/asm/inst.h index f5a796da07f8..d063841a17e3 100644 --- a/arch/x86/include/asm/inst.h +++ b/arch/x86/include/asm/inst.h @@ -306,6 +306,21 @@ .endif MODRM 0xc0 movq_r64_xmm_opd1 movq_r64_xmm_opd2 .endm + +.macro RDPID opd + REG_TYPE rdpid_opd_type \opd + .if rdpid_opd_type == REG_TYPE_R64 + R64_NUM rdpid_opd \opd + .else + R32_NUM rdpid_opd \opd + .endif + .byte 0xf3 + .if rdpid_opd > 7 + PFX_REX rdpid_opd 0 + .endif + .byte 0x0f, 0xc7 + MODRM 0xc0 rdpid_opd 0x7 +.endm #endif #endif diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 03b7c4ca425a..2a1f7e1d7151 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -457,10 +457,8 @@ static inline unsigned long cpu_kernelmode_gs_base(int cpu) DECLARE_PER_CPU(unsigned int, irq_count); extern asmlinkage void ignore_sysret(void); -#if IS_ENABLED(CONFIG_KVM) /* Save actual FS/GS selectors and bases to current->thread */ -void save_fsgs_for_kvm(void); -#endif +void current_save_fsgs(void); #else /* X86_64 */ #ifdef CONFIG_STACKPROTECTOR /* @@ -575,7 +573,7 @@ native_load_sp0(unsigned long sp0) this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0); } -static inline void native_swapgs(void) +static __always_inline void native_swapgs(void) { #ifdef CONFIG_X86_64 asm volatile("swapgs" ::: "memory"); diff --git a/arch/x86/include/asm/vermagic.h b/arch/x86/include/asm/vermagic.h index 75884d2cdec3..14c222e78213 100644 --- a/arch/x86/include/asm/vermagic.h +++ b/arch/x86/include/asm/vermagic.h @@ -17,6 +17,40 @@ #define MODULE_PROC_FAMILY "586MMX " #elif defined CONFIG_MCORE2 #define MODULE_PROC_FAMILY "CORE2 " +#elif defined CONFIG_MNATIVE +#define MODULE_PROC_FAMILY "NATIVE " +#elif defined CONFIG_MNEHALEM +#define MODULE_PROC_FAMILY "NEHALEM " +#elif defined CONFIG_MWESTMERE +#define MODULE_PROC_FAMILY "WESTMERE " +#elif defined CONFIG_MSILVERMONT +#define MODULE_PROC_FAMILY "SILVERMONT " +#elif defined CONFIG_MGOLDMONT +#define MODULE_PROC_FAMILY "GOLDMONT " +#elif defined CONFIG_MGOLDMONTPLUS +#define MODULE_PROC_FAMILY "GOLDMONTPLUS " +#elif defined CONFIG_MSANDYBRIDGE +#define MODULE_PROC_FAMILY "SANDYBRIDGE " +#elif defined CONFIG_MIVYBRIDGE +#define MODULE_PROC_FAMILY "IVYBRIDGE " +#elif defined CONFIG_MHASWELL +#define MODULE_PROC_FAMILY "HASWELL " +#elif defined CONFIG_MBROADWELL +#define MODULE_PROC_FAMILY "BROADWELL " +#elif defined CONFIG_MSKYLAKE +#define MODULE_PROC_FAMILY "SKYLAKE " +#elif defined CONFIG_MSKYLAKEX +#define MODULE_PROC_FAMILY "SKYLAKEX " +#elif defined CONFIG_MCANNONLAKE +#define MODULE_PROC_FAMILY "CANNONLAKE " +#elif defined CONFIG_MICELAKE +#define MODULE_PROC_FAMILY "ICELAKE " +#elif defined CONFIG_MCASCADELAKE +#define MODULE_PROC_FAMILY "CASCADELAKE " +#elif defined CONFIG_MCOOPERLAKE +#define MODULE_PROC_FAMILY "COOPERLAKE " +#elif defined CONFIG_MTIGERLAKE +#define MODULE_PROC_FAMILY "TIGERLAKE " #elif defined CONFIG_MATOM #define MODULE_PROC_FAMILY "ATOM " #elif defined CONFIG_M686 @@ -35,6 +69,28 @@ #define MODULE_PROC_FAMILY "K7 " #elif defined CONFIG_MK8 #define MODULE_PROC_FAMILY "K8 " +#elif defined CONFIG_MK8SSE3 +#define MODULE_PROC_FAMILY "K8SSE3 " +#elif defined CONFIG_MK10 +#define MODULE_PROC_FAMILY "K10 " +#elif defined CONFIG_MBARCELONA +#define MODULE_PROC_FAMILY "BARCELONA " +#elif defined CONFIG_MBOBCAT +#define MODULE_PROC_FAMILY "BOBCAT " +#elif defined CONFIG_MBULLDOZER +#define MODULE_PROC_FAMILY "BULLDOZER " +#elif defined CONFIG_MPILEDRIVER +#define MODULE_PROC_FAMILY "PILEDRIVER " +#elif defined CONFIG_MSTEAMROLLER +#define MODULE_PROC_FAMILY "STEAMROLLER " +#elif defined CONFIG_MJAGUAR +#define MODULE_PROC_FAMILY "JAGUAR " +#elif defined CONFIG_MEXCAVATOR +#define MODULE_PROC_FAMILY "EXCAVATOR " +#elif defined CONFIG_MZEN +#define MODULE_PROC_FAMILY "ZEN " +#elif defined CONFIG_MZEN2 +#define MODULE_PROC_FAMILY "ZEN2 " #elif defined CONFIG_MELAN #define MODULE_PROC_FAMILY "ELAN " #elif defined CONFIG_MCRUSOE diff --git a/arch/x86/include/uapi/asm/hwcap2.h b/arch/x86/include/uapi/asm/hwcap2.h index 8b2effe6efb8..5fdfcb47000f 100644 --- a/arch/x86/include/uapi/asm/hwcap2.h +++ b/arch/x86/include/uapi/asm/hwcap2.h @@ -5,4 +5,7 @@ /* MONITOR/MWAIT enabled in Ring 3 */ #define HWCAP2_RING3MWAIT (1 << 0) +/* Kernel allows FSGSBASE instructions available in Ring 3 */ +#define HWCAP2_FSGSBASE BIT(1) + #endif diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 0b71970d2d3d..5ea5fbd76ad0 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -543,14 +543,12 @@ static void __init spectre_v1_select_mitigation(void) * If FSGSBASE is enabled, the user can put a kernel address in * GS, in which case SMAP provides no protection. * - * [ NOTE: Don't check for X86_FEATURE_FSGSBASE until the - * FSGSBASE enablement patches have been merged. ] - * * If FSGSBASE is disabled, the user can only put a user space * address in GS. That makes an attack harder, but still * possible if there's no SMAP protection. */ - if (!smap_works_speculatively()) { + if (boot_cpu_has(X86_FEATURE_FSGSBASE) || + !smap_works_speculatively()) { /* * Mitigation can be provided from SWAPGS itself or * PTI as the CR3 write in the Meltdown mitigation diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 95c090a45b4b..965474d78cef 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -441,6 +441,22 @@ static void __init setup_cr_pinning(void) static_key_enable(&cr_pinning.key); } +static __init int x86_nofsgsbase_setup(char *arg) +{ + /* Require an exact match without trailing characters. */ + if (strlen(arg)) + return 0; + + /* Do not emit a message if the feature is not present. */ + if (!boot_cpu_has(X86_FEATURE_FSGSBASE)) + return 1; + + setup_clear_cpu_cap(X86_FEATURE_FSGSBASE); + pr_info("FSGSBASE disabled via kernel command line\n"); + return 1; +} +__setup("nofsgsbase", x86_nofsgsbase_setup); + /* * Protection Keys are not available in 32-bit mode. */ @@ -1495,6 +1511,12 @@ static void identify_cpu(struct cpuinfo_x86 *c) setup_smap(c); setup_umip(c); + /* Enable FSGSBASE instructions if available. */ + if (cpu_has(c, X86_FEATURE_FSGSBASE)) { + cr4_set_bits(X86_CR4_FSGSBASE); + elf_hwcap2 |= HWCAP2_FSGSBASE; + } + /* * The vendor-specific functions might have changed features. * Now we do "generic changes." diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index fe67dbd76e51..66f55845ce19 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -140,10 +140,12 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp, memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); #ifdef CONFIG_X86_64 - savesegment(gs, p->thread.gsindex); - p->thread.gsbase = p->thread.gsindex ? 0 : current->thread.gsbase; - savesegment(fs, p->thread.fsindex); - p->thread.fsbase = p->thread.fsindex ? 0 : current->thread.fsbase; + current_save_fsgs(); + p->thread.fsindex = current->thread.fsindex; + p->thread.fsbase = current->thread.fsbase; + p->thread.gsindex = current->thread.gsindex; + p->thread.gsbase = current->thread.gsbase; + savesegment(es, p->thread.es); savesegment(ds, p->thread.ds); #else diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 9a97415b2139..e14476f0c533 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -149,6 +149,56 @@ enum which_selector { GS }; +/* + * Out of line to be protected from kprobes and tracing. If this would be + * traced or probed than any access to a per CPU variable happens with + * the wrong GS. + * + * It is not used on Xen paravirt. When paravirt support is needed, it + * needs to be renamed with native_ prefix. + */ +static noinstr unsigned long __rdgsbase_inactive(void) +{ + unsigned long gsbase; + + lockdep_assert_irqs_disabled(); + + if (!static_cpu_has(X86_FEATURE_XENPV)) { + native_swapgs(); + gsbase = rdgsbase(); + native_swapgs(); + } else { + instrumentation_begin(); + rdmsrl(MSR_KERNEL_GS_BASE, gsbase); + instrumentation_end(); + } + + return gsbase; +} + +/* + * Out of line to be protected from kprobes and tracing. If this would be + * traced or probed than any access to a per CPU variable happens with + * the wrong GS. + * + * It is not used on Xen paravirt. When paravirt support is needed, it + * needs to be renamed with native_ prefix. + */ +static noinstr void __wrgsbase_inactive(unsigned long gsbase) +{ + lockdep_assert_irqs_disabled(); + + if (!static_cpu_has(X86_FEATURE_XENPV)) { + native_swapgs(); + wrgsbase(gsbase); + native_swapgs(); + } else { + instrumentation_begin(); + wrmsrl(MSR_KERNEL_GS_BASE, gsbase); + instrumentation_end(); + } +} + /* * Saves the FS or GS base for an outgoing thread if FSGSBASE extensions are * not available. The goal is to be reasonably fast on non-FSGSBASE systems. @@ -198,22 +248,35 @@ static __always_inline void save_fsgs(struct task_struct *task) { savesegment(fs, task->thread.fsindex); savesegment(gs, task->thread.gsindex); - save_base_legacy(task, task->thread.fsindex, FS); - save_base_legacy(task, task->thread.gsindex, GS); + if (static_cpu_has(X86_FEATURE_FSGSBASE)) { + /* + * If FSGSBASE is enabled, we can't make any useful guesses + * about the base, and user code expects us to save the current + * value. Fortunately, reading the base directly is efficient. + */ + task->thread.fsbase = rdfsbase(); + task->thread.gsbase = __rdgsbase_inactive(); + } else { + save_base_legacy(task, task->thread.fsindex, FS); + save_base_legacy(task, task->thread.gsindex, GS); + } } -#if IS_ENABLED(CONFIG_KVM) /* * While a process is running,current->thread.fsbase and current->thread.gsbase - * may not match the corresponding CPU registers (see save_base_legacy()). KVM - * wants an efficient way to save and restore FSBASE and GSBASE. - * When FSGSBASE extensions are enabled, this will have to use RD{FS,GS}BASE. + * may not match the corresponding CPU registers (see save_base_legacy()). */ -void save_fsgs_for_kvm(void) +void current_save_fsgs(void) { + unsigned long flags; + + /* Interrupts need to be off for FSGSBASE */ + local_irq_save(flags); save_fsgs(current); + local_irq_restore(flags); } -EXPORT_SYMBOL_GPL(save_fsgs_for_kvm); +#if IS_ENABLED(CONFIG_KVM) +EXPORT_SYMBOL_GPL(current_save_fsgs); #endif static __always_inline void loadseg(enum which_selector which, @@ -278,14 +341,26 @@ static __always_inline void load_seg_legacy(unsigned short prev_index, static __always_inline void x86_fsgsbase_load(struct thread_struct *prev, struct thread_struct *next) { - load_seg_legacy(prev->fsindex, prev->fsbase, - next->fsindex, next->fsbase, FS); - load_seg_legacy(prev->gsindex, prev->gsbase, - next->gsindex, next->gsbase, GS); + if (static_cpu_has(X86_FEATURE_FSGSBASE)) { + /* Update the FS and GS selectors if they could have changed. */ + if (unlikely(prev->fsindex || next->fsindex)) + loadseg(FS, next->fsindex); + if (unlikely(prev->gsindex || next->gsindex)) + loadseg(GS, next->gsindex); + + /* Update the bases. */ + wrfsbase(next->fsbase); + __wrgsbase_inactive(next->gsbase); + } else { + load_seg_legacy(prev->fsindex, prev->fsbase, + next->fsindex, next->fsbase, FS); + load_seg_legacy(prev->gsindex, prev->gsbase, + next->gsindex, next->gsbase, GS); + } } -static unsigned long x86_fsgsbase_read_task(struct task_struct *task, - unsigned short selector) +unsigned long x86_fsgsbase_read_task(struct task_struct *task, + unsigned short selector) { unsigned short idx = selector >> 3; unsigned long base; @@ -327,13 +402,44 @@ static unsigned long x86_fsgsbase_read_task(struct task_struct *task, return base; } +unsigned long x86_gsbase_read_cpu_inactive(void) +{ + unsigned long gsbase; + + if (static_cpu_has(X86_FEATURE_FSGSBASE)) { + unsigned long flags; + + local_irq_save(flags); + gsbase = __rdgsbase_inactive(); + local_irq_restore(flags); + } else { + rdmsrl(MSR_KERNEL_GS_BASE, gsbase); + } + + return gsbase; +} + +void x86_gsbase_write_cpu_inactive(unsigned long gsbase) +{ + if (static_cpu_has(X86_FEATURE_FSGSBASE)) { + unsigned long flags; + + local_irq_save(flags); + __wrgsbase_inactive(gsbase); + local_irq_restore(flags); + } else { + wrmsrl(MSR_KERNEL_GS_BASE, gsbase); + } +} + unsigned long x86_fsbase_read_task(struct task_struct *task) { unsigned long fsbase; if (task == current) fsbase = x86_fsbase_read_cpu(); - else if (task->thread.fsindex == 0) + else if (static_cpu_has(X86_FEATURE_FSGSBASE) || + (task->thread.fsindex == 0)) fsbase = task->thread.fsbase; else fsbase = x86_fsgsbase_read_task(task, task->thread.fsindex); @@ -347,7 +453,8 @@ unsigned long x86_gsbase_read_task(struct task_struct *task) if (task == current) gsbase = x86_gsbase_read_cpu_inactive(); - else if (task->thread.gsindex == 0) + else if (static_cpu_has(X86_FEATURE_FSGSBASE) || + (task->thread.gsindex == 0)) gsbase = task->thread.gsbase; else gsbase = x86_fsgsbase_read_task(task, task->thread.gsindex); diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 44130588987f..3f006489087f 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -281,17 +281,9 @@ static int set_segment_reg(struct task_struct *task, return -EIO; /* - * This function has some ABI oddities. - * - * A 32-bit ptracer probably expects that writing FS or GS will change - * FSBASE or GSBASE respectively. In the absence of FSGSBASE support, - * this code indeed has that effect. When FSGSBASE is added, this - * will require a special case. - * - * For existing 64-bit ptracers, writing FS or GS *also* currently - * changes the base if the selector is nonzero the next time the task - * is run. This behavior may not be needed, and trying to preserve it - * when FSGSBASE is added would be complicated at best. + * Writes to FS and GS will change the stored selector. Whether + * this changes the segment base as well depends on whether + * FSGSBASE is enabled. */ switch (offset) { @@ -379,25 +371,12 @@ static int putreg(struct task_struct *child, case offsetof(struct user_regs_struct,fs_base): if (value >= TASK_SIZE_MAX) return -EIO; - /* - * When changing the FS base, use do_arch_prctl_64() - * to set the index to zero and to set the base - * as requested. - * - * NB: This behavior is nonsensical and likely needs to - * change when FSGSBASE support is added. - */ - if (child->thread.fsbase != value) - return do_arch_prctl_64(child, ARCH_SET_FS, value); + x86_fsbase_write_task(child, value); return 0; case offsetof(struct user_regs_struct,gs_base): - /* - * Exactly the same here as the %fs handling above. - */ if (value >= TASK_SIZE_MAX) return -EIO; - if (child->thread.gsbase != value) - return do_arch_prctl_64(child, ARCH_SET_GS, value); + x86_gsbase_write_task(child, value); return 0; #endif } @@ -880,14 +859,39 @@ long arch_ptrace(struct task_struct *child, long request, static int putreg32(struct task_struct *child, unsigned regno, u32 value) { struct pt_regs *regs = task_pt_regs(child); + int ret; switch (regno) { SEG32(cs); SEG32(ds); SEG32(es); - SEG32(fs); - SEG32(gs); + + /* + * A 32-bit ptracer on a 64-bit kernel expects that writing + * FS or GS will also update the base. This is needed for + * operations like PTRACE_SETREGS to fully restore a saved + * CPU state. + */ + + case offsetof(struct user32, regs.fs): + ret = set_segment_reg(child, + offsetof(struct user_regs_struct, fs), + value); + if (ret == 0) + child->thread.fsbase = + x86_fsgsbase_read_task(child, value); + return ret; + + case offsetof(struct user32, regs.gs): + ret = set_segment_reg(child, + offsetof(struct user_regs_struct, gs), + value); + if (ret == 0) + child->thread.gsbase = + x86_fsgsbase_read_task(child, value); + return ret; + SEG32(ss); R32(ebx, bx); diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 13745f2a5ecd..99bb9c46b17b 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -1169,7 +1169,7 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) gs_base = cpu_kernelmode_gs_base(cpu); if (likely(is_64bit_mm(current->mm))) { - save_fsgs_for_kvm(); + current_save_fsgs(); fs_sel = current->thread.fsindex; gs_sel = current->thread.gsindex; fs_base = current->thread.fsbase; diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 50c8f034c01c..514daa564283 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -99,19 +99,19 @@ * [1] P. Valente, A. Avanzini, "Evolution of the BFQ Storage I/O * Scheduler", Proceedings of the First Workshop on Mobile System * Technologies (MST-2015), May 2015. - * http://algogroup.unimore.it/people/paolo/disk_sched/mst-2015.pdf + * https://algogroup.unimore.it/people/paolo/disk_sched/mst-2015.pdf * * [2] Jon C.R. Bennett and H. Zhang, "Hierarchical Packet Fair Queueing * Algorithms", IEEE/ACM Transactions on Networking, 5(5):675-689, * Oct 1997. * - * http://www.cs.cmu.edu/~hzhang/papers/TON-97-Oct.ps.gz + * https://www.cs.cmu.edu/~hzhang/papers/TON-97-Oct.ps.gz * * [3] I. Stoica and H. Abdel-Wahab, "Earliest Eligible Virtual Deadline * First: A Flexible and Accurate Mechanism for Proportional Share * Resource Allocation", technical report. * - * http://www.cs.berkeley.edu/~istoica/papers/eevdf-tr-95.pdf + * https://www.cs.berkeley.edu/~istoica/papers/eevdf-tr-95.pdf */ #include #include @@ -4714,7 +4714,7 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) * some unlucky request wait for as long as the device * wishes. * - * Of course, serving one request at at time may cause loss of + * Of course, serving one request at a time may cause loss of * throughput. */ if (bfqd->strict_guarantees && bfqd->rq_in_driver > 0) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h index eca95b7f64d2..d01f47c83eb1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h @@ -39,6 +39,7 @@ static inline bool mt76x2_channel_silent(struct mt76x02_dev *dev) extern const struct ieee80211_ops mt76x2_ops; int mt76x2_register_device(struct mt76x02_dev *dev); +int mt76x2_resume_device(struct mt76x02_dev *dev); void mt76x2_phy_power_on(struct mt76x02_dev *dev); void mt76x2_stop_hardware(struct mt76x02_dev *dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c index 53ca0cedf026..e7910023a62a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c @@ -103,6 +103,58 @@ mt76pci_remove(struct pci_dev *pdev) mt76_free_device(mdev); } +static int __maybe_unused +mt76x2e_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct mt76_dev *mdev = pci_get_drvdata(pdev); + int i, err; + + napi_disable(&mdev->tx_napi); + tasklet_kill(&mdev->pre_tbtt_tasklet); + tasklet_kill(&mdev->tx_tasklet); + + mt76_for_each_q_rx(mdev, i) + napi_disable(&mdev->napi[i]); + + pci_enable_wake(pdev, pci_choose_state(pdev, state), true); + pci_save_state(pdev); + err = pci_set_power_state(pdev, pci_choose_state(pdev, state)); + if (err) + goto restore; + + return 0; + +restore: + mt76_for_each_q_rx(mdev, i) + napi_enable(&mdev->napi[i]); + napi_enable(&mdev->tx_napi); + + return err; +} + +static int __maybe_unused +mt76x2e_resume(struct pci_dev *pdev) +{ + struct mt76_dev *mdev = pci_get_drvdata(pdev); + struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); + int i, err; + + err = pci_set_power_state(pdev, PCI_D0); + if (err) + return err; + + pci_restore_state(pdev); + + mt76_for_each_q_rx(mdev, i) { + napi_enable(&mdev->napi[i]); + napi_schedule(&mdev->napi[i]); + } + napi_enable(&mdev->tx_napi); + napi_schedule(&mdev->tx_napi); + + return mt76x2_resume_device(dev); +} + MODULE_DEVICE_TABLE(pci, mt76pci_device_table); MODULE_FIRMWARE(MT7662_FIRMWARE); MODULE_FIRMWARE(MT7662_ROM_PATCH); @@ -113,6 +165,10 @@ static struct pci_driver mt76pci_driver = { .id_table = mt76pci_device_table, .probe = mt76pci_probe, .remove = mt76pci_remove, +#ifdef CONFIG_PM + .suspend = mt76x2e_suspend, + .resume = mt76x2e_resume, +#endif /* CONFIG_PM */ }; module_pci_driver(mt76pci_driver); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c index f27774f57438..101a0fe00ef3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c @@ -217,6 +217,23 @@ mt76x2_power_on(struct mt76x02_dev *dev) mt76x2_power_on_rf(dev, 1); } +int mt76x2_resume_device(struct mt76x02_dev *dev) +{ + int err; + + mt76x02_dma_disable(dev); + mt76x2_reset_wlan(dev, true); + mt76x2_power_on(dev); + + err = mt76x2_mac_reset(dev, true); + if (err) + return err; + + mt76x02_mac_start(dev); + + return mt76x2_mcu_init(dev); +} + static int mt76x2_init_hardware(struct mt76x02_dev *dev) { int ret; diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c index c25a4286d766..5e722e1b6466 100644 --- a/drivers/platform/x86/dell-wmi.c +++ b/drivers/platform/x86/dell-wmi.c @@ -173,6 +173,11 @@ static const struct key_entry dell_wmi_keymap_type_0000[] = { /* Dell Support Center key */ { KE_IGNORE, 0xe06e, { KEY_RESERVED } }, + /* Dell Vostro 3360 multimedia keys with mangled DSDT */ + { KE_KEY, 0xe0f1, { KEY_PROG1 } }, + { KE_KEY, 0xe0f2, { KEY_PROG2 } }, + { KE_KEY, 0xe0f3, { KEY_PROG3 } }, + { KE_IGNORE, 0xe0f7, { KEY_MUTE } }, { KE_IGNORE, 0xe0f8, { KEY_VOLUMEDOWN } }, { KE_IGNORE, 0xe0f9, { KEY_VOLUMEUP } }, diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c index 18ebd7a6af98..0b43efddea22 100644 --- a/drivers/virt/vboxguest/vboxguest_core.c +++ b/drivers/virt/vboxguest/vboxguest_core.c @@ -559,7 +559,7 @@ static int vbg_reset_host_event_filter(struct vbg_dev *gdev, * Changes the event filter mask for the given session. * * This is called in response to VBG_IOCTL_CHANGE_FILTER_MASK as well as to - * do session cleanup. Takes the session spinlock. + * do session cleanup. Takes the session mutex. * * Return: 0 or negative errno value. * @gdev: The Guest extension device. @@ -662,7 +662,156 @@ static int vbg_reset_host_capabilities(struct vbg_dev *gdev) } /** - * Sets the guest capabilities for a session. Takes the session spinlock. + * Set guest capabilities on the host. + * Must be called with gdev->session_mutex hold. + * Return: 0 or negative errno value. + * @gdev: The Guest extension device. + * @session: The session. + * @session_termination: Set if we're called by the session cleanup code. + */ +static int vbg_set_host_capabilities(struct vbg_dev *gdev, + struct vbg_session *session, + bool session_termination) +{ + struct vmmdev_mask *req; + u32 caps; + int rc; + + WARN_ON(!mutex_is_locked(&gdev->session_mutex)); + + caps = gdev->acquired_guest_caps | gdev->set_guest_caps_tracker.mask; + + if (gdev->guest_caps_host == caps) + return 0; + + /* On termination the requestor is the kernel, as we're cleaning up. */ + req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES, + session_termination ? VBG_KERNEL_REQUEST : + session->requestor); + if (!req) { + gdev->guest_caps_host = U32_MAX; + return -ENOMEM; + } + + req->or_mask = caps; + req->not_mask = ~caps; + rc = vbg_req_perform(gdev, req); + vbg_req_free(req, sizeof(*req)); + + gdev->guest_caps_host = (rc >= 0) ? caps : U32_MAX; + + return vbg_status_code_to_errno(rc); +} + +/** + * Acquire (get exclusive access) guest capabilities for a session. + * Takes the session mutex. + * Return: 0 or negative errno value. + * @gdev: The Guest extension device. + * @session: The session. + * @flags: Flags (VBGL_IOC_AGC_FLAGS_XXX). + * @or_mask: The capabilities to add. + * @not_mask: The capabilities to remove. + * @session_termination: Set if we're called by the session cleanup code. + * This tweaks the error handling so we perform + * proper session cleanup even if the host + * misbehaves. + */ +static int vbg_acquire_session_capabilities(struct vbg_dev *gdev, + struct vbg_session *session, + u32 or_mask, u32 not_mask, + u32 flags, bool session_termination) +{ + unsigned long irqflags; + bool wakeup = false; + int ret = 0; + + mutex_lock(&gdev->session_mutex); + + if (gdev->set_guest_caps_tracker.mask & or_mask) { + vbg_err("%s error: cannot acquire caps which are currently set\n", + __func__); + ret = -EINVAL; + goto out; + } + + /* + * Mark any caps in the or_mask as now being in acquire-mode. Note + * once caps are in acquire_mode they always stay in this mode. + * This impacts event handling, so we take the event-lock. + */ + spin_lock_irqsave(&gdev->event_spinlock, irqflags); + gdev->acquire_mode_guest_caps |= or_mask; + spin_unlock_irqrestore(&gdev->event_spinlock, irqflags); + + /* If we only have to switch the caps to acquire mode, we're done. */ + if (flags & VBGL_IOC_AGC_FLAGS_CONFIG_ACQUIRE_MODE) + goto out; + + not_mask &= ~or_mask; /* or_mask takes priority over not_mask */ + not_mask &= session->acquired_guest_caps; + or_mask &= ~session->acquired_guest_caps; + + if (or_mask == 0 && not_mask == 0) + goto out; + + if (gdev->acquired_guest_caps & or_mask) { + ret = -EBUSY; + goto out; + } + + gdev->acquired_guest_caps |= or_mask; + gdev->acquired_guest_caps &= ~not_mask; + /* session->acquired_guest_caps impacts event handling, take the lock */ + spin_lock_irqsave(&gdev->event_spinlock, irqflags); + session->acquired_guest_caps |= or_mask; + session->acquired_guest_caps &= ~not_mask; + spin_unlock_irqrestore(&gdev->event_spinlock, irqflags); + + ret = vbg_set_host_capabilities(gdev, session, session_termination); + /* Roll back on failure, unless it's session termination time. */ + if (ret < 0 && !session_termination) { + gdev->acquired_guest_caps &= ~or_mask; + gdev->acquired_guest_caps |= not_mask; + spin_lock_irqsave(&gdev->event_spinlock, irqflags); + session->acquired_guest_caps &= ~or_mask; + session->acquired_guest_caps |= not_mask; + spin_unlock_irqrestore(&gdev->event_spinlock, irqflags); + } + + /* + * If we added a capability, check if that means some other thread in + * our session should be unblocked because there are events pending + * (the result of vbg_get_allowed_event_mask_for_session() may change). + * + * HACK ALERT! When the seamless support capability is added we generate + * a seamless change event so that the ring-3 client can sync with + * the seamless state. + */ + if (ret == 0 && or_mask != 0) { + spin_lock_irqsave(&gdev->event_spinlock, irqflags); + + if (or_mask & VMMDEV_GUEST_SUPPORTS_SEAMLESS) + gdev->pending_events |= + VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST; + + if (gdev->pending_events) + wakeup = true; + + spin_unlock_irqrestore(&gdev->event_spinlock, irqflags); + + if (wakeup) + wake_up(&gdev->event_wq); + } + +out: + mutex_unlock(&gdev->session_mutex); + + return ret; +} + +/** + * Sets the guest capabilities for a session. Takes the session mutex. * Return: 0 or negative errno value. * @gdev: The Guest extension device. * @session: The session. @@ -678,62 +827,40 @@ static int vbg_set_session_capabilities(struct vbg_dev *gdev, u32 or_mask, u32 not_mask, bool session_termination) { - struct vmmdev_mask *req; u32 changed, previous; - int rc, ret = 0; - - /* - * Allocate a request buffer before taking the spinlock, when - * the session is being terminated the requestor is the kernel, - * as we're cleaning up. - */ - req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES, - session_termination ? VBG_KERNEL_REQUEST : - session->requestor); - if (!req) { - if (!session_termination) - return -ENOMEM; - /* Ignore allocation failure, we must do session cleanup. */ - } + int ret = 0; mutex_lock(&gdev->session_mutex); + if (gdev->acquire_mode_guest_caps & or_mask) { + vbg_err("%s error: cannot set caps which are in acquire_mode\n", + __func__); + ret = -EBUSY; + goto out; + } + /* Apply the changes to the session mask. */ - previous = session->guest_caps; - session->guest_caps |= or_mask; - session->guest_caps &= ~not_mask; + previous = session->set_guest_caps; + session->set_guest_caps |= or_mask; + session->set_guest_caps &= ~not_mask; /* If anything actually changed, update the global usage counters. */ - changed = previous ^ session->guest_caps; + changed = previous ^ session->set_guest_caps; if (!changed) goto out; - vbg_track_bit_usage(&gdev->guest_caps_tracker, changed, previous); - or_mask = gdev->guest_caps_tracker.mask; - - if (gdev->guest_caps_host == or_mask || !req) - goto out; + vbg_track_bit_usage(&gdev->set_guest_caps_tracker, changed, previous); - gdev->guest_caps_host = or_mask; - req->or_mask = or_mask; - req->not_mask = ~or_mask; - rc = vbg_req_perform(gdev, req); - if (rc < 0) { - ret = vbg_status_code_to_errno(rc); - - /* Failed, roll back (unless it's session termination time). */ - gdev->guest_caps_host = U32_MAX; - if (session_termination) - goto out; - - vbg_track_bit_usage(&gdev->guest_caps_tracker, changed, - session->guest_caps); - session->guest_caps = previous; + ret = vbg_set_host_capabilities(gdev, session, session_termination); + /* Roll back on failure, unless it's session termination time. */ + if (ret < 0 && !session_termination) { + vbg_track_bit_usage(&gdev->set_guest_caps_tracker, changed, + session->set_guest_caps); + session->set_guest_caps = previous; } out: mutex_unlock(&gdev->session_mutex); - vbg_req_free(req, sizeof(*req)); return ret; } @@ -949,6 +1076,7 @@ void vbg_core_close_session(struct vbg_session *session) struct vbg_dev *gdev = session->gdev; int i, rc; + vbg_acquire_session_capabilities(gdev, session, 0, U32_MAX, 0, true); vbg_set_session_capabilities(gdev, session, 0, U32_MAX, true); vbg_set_session_event_filter(gdev, session, 0, U32_MAX, true); @@ -1006,6 +1134,25 @@ static int vbg_ioctl_driver_version_info( return 0; } +/* Must be called with the event_lock held */ +static u32 vbg_get_allowed_event_mask_for_session(struct vbg_dev *gdev, + struct vbg_session *session) +{ + u32 acquire_mode_caps = gdev->acquire_mode_guest_caps; + u32 session_acquired_caps = session->acquired_guest_caps; + u32 allowed_events = VMMDEV_EVENT_VALID_EVENT_MASK; + + if ((acquire_mode_caps & VMMDEV_GUEST_SUPPORTS_GRAPHICS) && + !(session_acquired_caps & VMMDEV_GUEST_SUPPORTS_GRAPHICS)) + allowed_events &= ~VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST; + + if ((acquire_mode_caps & VMMDEV_GUEST_SUPPORTS_SEAMLESS) && + !(session_acquired_caps & VMMDEV_GUEST_SUPPORTS_SEAMLESS)) + allowed_events &= ~VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST; + + return allowed_events; +} + static bool vbg_wait_event_cond(struct vbg_dev *gdev, struct vbg_session *session, u32 event_mask) @@ -1017,6 +1164,7 @@ static bool vbg_wait_event_cond(struct vbg_dev *gdev, spin_lock_irqsave(&gdev->event_spinlock, flags); events = gdev->pending_events & event_mask; + events &= vbg_get_allowed_event_mask_for_session(gdev, session); wakeup = events || session->cancel_waiters; spin_unlock_irqrestore(&gdev->event_spinlock, flags); @@ -1031,6 +1179,7 @@ static u32 vbg_consume_events_locked(struct vbg_dev *gdev, { u32 events = gdev->pending_events & event_mask; + events &= vbg_get_allowed_event_mask_for_session(gdev, session); gdev->pending_events &= ~events; return events; } @@ -1150,7 +1299,9 @@ static int vbg_req_allowed(struct vbg_dev *gdev, struct vbg_session *session, case VMMDEVREQ_VIDEO_ACCEL_ENABLE: case VMMDEVREQ_VIDEO_ACCEL_FLUSH: case VMMDEVREQ_VIDEO_SET_VISIBLE_REGION: + case VMMDEVREQ_VIDEO_UPDATE_MONITOR_POSITIONS: case VMMDEVREQ_GET_DISPLAY_CHANGE_REQEX: + case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ_MULTI: case VMMDEVREQ_GET_SEAMLESS_CHANGE_REQ: case VMMDEVREQ_GET_VRDPCHANGE_REQ: case VMMDEVREQ_LOG_STRING: @@ -1432,6 +1583,29 @@ static int vbg_ioctl_change_filter_mask(struct vbg_dev *gdev, false); } +static int vbg_ioctl_acquire_guest_capabilities(struct vbg_dev *gdev, + struct vbg_session *session, + struct vbg_ioctl_acquire_guest_caps *caps) +{ + u32 flags, or_mask, not_mask; + + if (vbg_ioctl_chk(&caps->hdr, sizeof(caps->u.in), 0)) + return -EINVAL; + + flags = caps->u.in.flags; + or_mask = caps->u.in.or_mask; + not_mask = caps->u.in.not_mask; + + if (flags & ~VBGL_IOC_AGC_FLAGS_VALID_MASK) + return -EINVAL; + + if ((or_mask | not_mask) & ~VMMDEV_GUEST_CAPABILITIES_MASK) + return -EINVAL; + + return vbg_acquire_session_capabilities(gdev, session, or_mask, + not_mask, flags, false); +} + static int vbg_ioctl_change_guest_capabilities(struct vbg_dev *gdev, struct vbg_session *session, struct vbg_ioctl_set_guest_caps *caps) { @@ -1452,7 +1626,7 @@ static int vbg_ioctl_change_guest_capabilities(struct vbg_dev *gdev, if (ret) return ret; - caps->u.out.session_caps = session->guest_caps; + caps->u.out.session_caps = session->set_guest_caps; caps->u.out.global_caps = gdev->guest_caps_host; return 0; @@ -1541,6 +1715,8 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data) return vbg_ioctl_interrupt_all_wait_events(gdev, session, data); case VBG_IOCTL_CHANGE_FILTER_MASK: return vbg_ioctl_change_filter_mask(gdev, session, data); + case VBG_IOCTL_ACQUIRE_GUEST_CAPABILITIES: + return vbg_ioctl_acquire_guest_capabilities(gdev, session, data); case VBG_IOCTL_CHANGE_GUEST_CAPABILITIES: return vbg_ioctl_change_guest_capabilities(gdev, session, data); case VBG_IOCTL_CHECK_BALLOON: @@ -1563,7 +1739,7 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data) return vbg_ioctl_log(data); } - vbg_debug("VGDrvCommonIoCtl: Unknown req %#08x\n", req); + vbg_err_ratelimited("Userspace made an unknown ioctl req %#08x\n", req); return -ENOTTY; } diff --git a/drivers/virt/vboxguest/vboxguest_core.h b/drivers/virt/vboxguest/vboxguest_core.h index 77c3a9c8255d..ab4bf64e2cec 100644 --- a/drivers/virt/vboxguest/vboxguest_core.h +++ b/drivers/virt/vboxguest/vboxguest_core.h @@ -118,11 +118,21 @@ struct vbg_dev { u32 event_filter_host; /** - * Usage counters for guest capabilities. Indexed by capability bit + * Guest capabilities which have been switched to acquire_mode. + */ + u32 acquire_mode_guest_caps; + /** + * Guest capabilities acquired by vbg_acquire_session_capabilities(). + * Only one session can acquire a capability at a time. + */ + u32 acquired_guest_caps; + /** + * Usage counters for guest capabilities requested through + * vbg_set_session_capabilities(). Indexed by capability bit * number, one count per session using a capability. * Protected by session_mutex. */ - struct vbg_bit_usage_tracker guest_caps_tracker; + struct vbg_bit_usage_tracker set_guest_caps_tracker; /** * The guest capabilities last reported to the host (or UINT32_MAX). * Protected by session_mutex. @@ -164,11 +174,16 @@ struct vbg_session { */ u32 event_filter; /** - * Guest capabilities for this session. + * Guest capabilities acquired by vbg_acquire_session_capabilities(). + * Only one session can acquire a capability at a time. + */ + u32 acquired_guest_caps; + /** + * Guest capabilities set through vbg_set_session_capabilities(). * A capability claimed by any guest session will be reported to the * host. Protected by vbg_gdev.session_mutex. */ - u32 guest_caps; + u32 set_guest_caps; /** VMMDEV_REQUESTOR_* flags */ u32 requestor; /** Set on CANCEL_ALL_WAITEVENTS, protected by vbg_devevent_spinlock. */ diff --git a/drivers/virt/vboxguest/vboxguest_utils.c b/drivers/virt/vboxguest/vboxguest_utils.c index 7396187ee32a..ea05af41ec69 100644 --- a/drivers/virt/vboxguest/vboxguest_utils.c +++ b/drivers/virt/vboxguest/vboxguest_utils.c @@ -59,6 +59,7 @@ EXPORT_SYMBOL(name) VBG_LOG(vbg_info, pr_info); VBG_LOG(vbg_warn, pr_warn); VBG_LOG(vbg_err, pr_err); +VBG_LOG(vbg_err_ratelimited, pr_err_ratelimited); #if defined(DEBUG) && !defined(CONFIG_DYNAMIC_DEBUG) VBG_LOG(vbg_debug, pr_debug); #endif diff --git a/fs/proc/base.c b/fs/proc/base.c index d86c0afc8a85..9b446b5850fe 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -96,6 +96,8 @@ #include #include #include +#include +#include #include #include "internal.h" #include "fd.h" @@ -2189,16 +2191,16 @@ struct map_files_info { }; /* - * Only allow CAP_SYS_ADMIN to follow the links, due to concerns about how the - * symlinks may be used to bypass permissions on ancestor directories in the - * path to the file in question. + * Only allow CAP_SYS_ADMIN and CAP_CHECKPOINT_RESTORE to follow the links, due + * to concerns about how the symlinks may be used to bypass permissions on + * ancestor directories in the path to the file in question. */ static const char * proc_map_files_get_link(struct dentry *dentry, struct inode *inode, struct delayed_call *done) { - if (!capable(CAP_SYS_ADMIN)) + if (!checkpoint_restore_ns_capable(&init_user_ns)) return ERR_PTR(-EPERM); return proc_pid_get_link(dentry, inode, done); @@ -3136,6 +3138,149 @@ static int proc_stack_depth(struct seq_file *m, struct pid_namespace *ns, } #endif /* CONFIG_STACKLEAK_METRICS */ +#ifdef CONFIG_KSM +static int ksm_open(struct inode *inode, struct file *file) +{ + struct task_struct *task; + struct mm_struct *mm; + int err; + + task = get_proc_task(inode); + if (!task) { + err = -ESRCH; + goto out; + } + if (task->flags & PF_KTHREAD) { + put_task_struct(task); + err = -EINVAL; + goto out; + } + + mm = mm_access(task, PTRACE_MODE_ATTACH_FSCREDS); + put_task_struct(task); + if (!mm) { + err = -EINVAL; + goto out; + } + if (IS_ERR(mm)) { + err = PTR_ERR(mm); + goto out; + } + + /* ensure this mm_struct can't be freed */ + mmgrab(mm); + /* but do not pin its memory */ + mmput(mm); + + err = 0; + file->private_data = mm; + +out: + return err; +} + +static ssize_t ksm_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + char kbuf[PROC_NUMBUF]; + char *pos; + int behaviour; + struct mm_struct *mm = file->private_data; + int err; + int last_err; + struct vm_area_struct *vma; + + if (!mm) { + err = -EINVAL; + goto out; + } + + /* Only allow a very narrow range of strings to be written */ + if ((*ppos != 0) || (count >= sizeof(kbuf))) { + err = -EINVAL; + goto out; + } + + /* What was written? */ + if (copy_from_user(kbuf, buf, count)) { + err = -EFAULT; + goto out; + } + kbuf[count] = '\0'; + pos = kbuf; + + /* What is being requested? */ + if (strncmp(pos, "merge", 5) == 0) { + pos += 5; + behaviour = MADV_MERGEABLE; + } + else if (strncmp(pos, "unmerge", 7) == 0) { + pos += 7; + behaviour = MADV_UNMERGEABLE; + } + else { + err = -EINVAL; + goto out; + } + + /* Verify there is not trailing junk on the line */ + pos = skip_spaces(pos); + if (*pos != '\0') { + err = -EINVAL; + goto out; + } + + if (!mmget_not_zero(mm)) { + err = -EINVAL; + goto out; + } + + mmap_write_lock(mm); + if (!mmget_still_valid(mm)) { + err = -EINVAL; + goto skip_mm; + } + + err = 0; + + vma = mm->mmap; + while (vma) { + if (behaviour == MADV_MERGEABLE) + last_err = ksm_madvise_merge(vma->vm_mm, vma, &vma->vm_flags); + else + last_err = ksm_madvise_unmerge(vma, vma->vm_start, vma->vm_end, &vma->vm_flags); + if (last_err) + err = last_err; + vma = vma->vm_next; + } + +skip_mm: + mmap_write_unlock(mm); + + mmput(mm); + +out: + return err ? err : count; +} + +static int ksm_release(struct inode *inode, struct file *file) +{ + struct mm_struct *mm = file->private_data; + + if (mm) + mmdrop(mm); + + return 0; +} + +static const struct file_operations proc_ksm_operations = { + .open = ksm_open, + .write = ksm_write, + .llseek = noop_llseek, + .release = ksm_release, +}; +#endif /* CONFIG_KSM */ + /* * Thread groups */ @@ -3249,6 +3394,9 @@ static const struct pid_entry tgid_base_stuff[] = { #ifdef CONFIG_PROC_PID_ARCH_STATUS ONE("arch_status", S_IRUGO, proc_pid_arch_status), #endif +#ifdef CONFIG_KSM + REG("ksm", S_IRUGO|S_IWUSR, proc_ksm_operations), +#endif }; static int proc_tgid_base_readdir(struct file *file, struct dir_context *ctx) diff --git a/include/linux/capability.h b/include/linux/capability.h index b4345b38a6be..1e7fe311cabe 100644 --- a/include/linux/capability.h +++ b/include/linux/capability.h @@ -261,6 +261,12 @@ static inline bool bpf_capable(void) return capable(CAP_BPF) || capable(CAP_SYS_ADMIN); } +static inline bool checkpoint_restore_ns_capable(struct user_namespace *ns) +{ + return ns_capable(ns, CAP_CHECKPOINT_RESTORE) || + ns_capable(ns, CAP_SYS_ADMIN); +} + /* audit system wants to get cap info from files as well */ extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps); diff --git a/include/linux/compaction.h b/include/linux/compaction.h index 6fa0eea3f530..25a521d299c1 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -85,11 +85,13 @@ static inline unsigned long compact_gap(unsigned int order) #ifdef CONFIG_COMPACTION extern int sysctl_compact_memory; +extern unsigned int sysctl_compaction_proactiveness; extern int sysctl_compaction_handler(struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos); extern int sysctl_extfrag_threshold; extern int sysctl_compact_unevictable_allowed; +extern unsigned int extfrag_for_order(struct zone *zone, unsigned int order); extern int fragmentation_index(struct zone *zone, unsigned int order); extern enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, diff --git a/include/linux/decompress/unzstd.h b/include/linux/decompress/unzstd.h new file mode 100644 index 000000000000..56d539ae880f --- /dev/null +++ b/include/linux/decompress/unzstd.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef LINUX_DECOMPRESS_UNZSTD_H +#define LINUX_DECOMPRESS_UNZSTD_H + +int unzstd(unsigned char *inbuf, long len, + long (*fill)(void*, unsigned long), + long (*flush)(void*, unsigned long), + unsigned char *output, + long *pos, + void (*error_fn)(char *x)); +#endif diff --git a/include/linux/ksm.h b/include/linux/ksm.h index e48b1e453ff5..a91a7cfc87a1 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -19,6 +19,10 @@ struct stable_node; struct mem_cgroup; #ifdef CONFIG_KSM +int ksm_madvise_merge(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long *vm_flags); +int ksm_madvise_unmerge(struct vm_area_struct *vma, unsigned long start, + unsigned long end, unsigned long *vm_flags); int ksm_madvise(struct vm_area_struct *vma, unsigned long start, unsigned long end, int advice, unsigned long *vm_flags); int __ksm_enter(struct mm_struct *mm); diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h index 6ef1c7109fc4..41b1019f6029 100644 --- a/include/linux/user_namespace.h +++ b/include/linux/user_namespace.h @@ -99,6 +99,12 @@ struct ucounts { extern struct user_namespace init_user_ns; +#ifdef CONFIG_USER_NS +extern int unprivileged_userns_clone; +#else +#define unprivileged_userns_clone 0 +#endif + bool setup_userns_sysctls(struct user_namespace *ns); void retire_userns_sysctls(struct user_namespace *ns); struct ucounts *inc_ucount(struct user_namespace *ns, kuid_t uid, enum ucount_type type); diff --git a/include/linux/vbox_utils.h b/include/linux/vbox_utils.h index ff56c443180c..db8a7d118093 100644 --- a/include/linux/vbox_utils.h +++ b/include/linux/vbox_utils.h @@ -16,6 +16,7 @@ struct vbg_dev; __printf(1, 2) void vbg_info(const char *fmt, ...); __printf(1, 2) void vbg_warn(const char *fmt, ...); __printf(1, 2) void vbg_err(const char *fmt, ...); +__printf(1, 2) void vbg_err_ratelimited(const char *fmt, ...); /* Only use backdoor logging for non-dynamic debug builds */ #if defined(DEBUG) && !defined(CONFIG_DYNAMIC_DEBUG) diff --git a/include/uapi/linux/capability.h b/include/uapi/linux/capability.h index 48ff0757ae5e..395dd0df8d08 100644 --- a/include/uapi/linux/capability.h +++ b/include/uapi/linux/capability.h @@ -408,7 +408,14 @@ struct vfs_ns_cap_data { */ #define CAP_BPF 39 -#define CAP_LAST_CAP CAP_BPF + +/* Allow checkpoint/restore related operations */ +/* Allow PID selection during clone3() */ +/* Allow writing to ns_last_pid */ + +#define CAP_CHECKPOINT_RESTORE 40 + +#define CAP_LAST_CAP CAP_CHECKPOINT_RESTORE #define cap_valid(x) ((x) >= 0 && (x) <= CAP_LAST_CAP) diff --git a/include/uapi/linux/futex.h b/include/uapi/linux/futex.h index a89eb0accd5e..a3e760886b8e 100644 --- a/include/uapi/linux/futex.h +++ b/include/uapi/linux/futex.h @@ -21,6 +21,7 @@ #define FUTEX_WAKE_BITSET 10 #define FUTEX_WAIT_REQUEUE_PI 11 #define FUTEX_CMP_REQUEUE_PI 12 +#define FUTEX_WAIT_MULTIPLE 31 #define FUTEX_PRIVATE_FLAG 128 #define FUTEX_CLOCK_REALTIME 256 @@ -40,6 +41,8 @@ FUTEX_PRIVATE_FLAG) #define FUTEX_CMP_REQUEUE_PI_PRIVATE (FUTEX_CMP_REQUEUE_PI | \ FUTEX_PRIVATE_FLAG) +#define FUTEX_WAIT_MULTIPLE_PRIVATE (FUTEX_WAIT_MULTIPLE | \ + FUTEX_PRIVATE_FLAG) /* * Support for robust futexes: the kernel cleans up held futexes at @@ -150,4 +153,21 @@ struct robust_list_head { (((op & 0xf) << 28) | ((cmp & 0xf) << 24) \ | ((oparg & 0xfff) << 12) | (cmparg & 0xfff)) +/* + * Maximum number of multiple futexes to wait for + */ +#define FUTEX_MULTIPLE_MAX_COUNT 128 + +/** + * struct futex_wait_block - Block of futexes to be waited for + * @uaddr: User address of the futex + * @val: Futex value expected by userspace + * @bitset: Bitset for the optional bitmasked wakeup + */ +struct futex_wait_block { + __u32 __user *uaddr; + __u32 val; + __u32 bitset; +}; + #endif /* _UAPI_LINUX_FUTEX_H */ diff --git a/include/uapi/linux/vbox_vmmdev_types.h b/include/uapi/linux/vbox_vmmdev_types.h index c27289fd619a..f8a8d6b3c521 100644 --- a/include/uapi/linux/vbox_vmmdev_types.h +++ b/include/uapi/linux/vbox_vmmdev_types.h @@ -63,6 +63,7 @@ enum vmmdev_request_type { VMMDEVREQ_SET_GUEST_CAPABILITIES = 56, VMMDEVREQ_VIDEMODE_SUPPORTED2 = 57, /* since version 3.2.0 */ VMMDEVREQ_GET_DISPLAY_CHANGE_REQEX = 80, /* since version 4.2.4 */ + VMMDEVREQ_GET_DISPLAY_CHANGE_REQ_MULTI = 81, VMMDEVREQ_HGCM_CONNECT = 60, VMMDEVREQ_HGCM_DISCONNECT = 61, VMMDEVREQ_HGCM_CALL32 = 62, @@ -92,6 +93,8 @@ enum vmmdev_request_type { VMMDEVREQ_WRITE_COREDUMP = 218, VMMDEVREQ_GUEST_HEARTBEAT = 219, VMMDEVREQ_HEARTBEAT_CONFIGURE = 220, + VMMDEVREQ_NT_BUG_CHECK = 221, + VMMDEVREQ_VIDEO_UPDATE_MONITOR_POSITIONS = 222, /* Ensure the enum is a 32 bit data-type */ VMMDEVREQ_SIZEHACK = 0x7fffffff }; diff --git a/include/uapi/linux/vboxguest.h b/include/uapi/linux/vboxguest.h index f79d7abe27db..15125f6ec60d 100644 --- a/include/uapi/linux/vboxguest.h +++ b/include/uapi/linux/vboxguest.h @@ -257,6 +257,30 @@ VMMDEV_ASSERT_SIZE(vbg_ioctl_change_filter, 24 + 8); _IOWR('V', 12, struct vbg_ioctl_change_filter) +/** VBG_IOCTL_ACQUIRE_GUEST_CAPABILITIES data structure. */ +struct vbg_ioctl_acquire_guest_caps { + /** The header. */ + struct vbg_ioctl_hdr hdr; + union { + struct { + /** Flags (VBGL_IOC_AGC_FLAGS_XXX). */ + __u32 flags; + /** Capabilities to set (VMMDEV_GUEST_SUPPORTS_XXX). */ + __u32 or_mask; + /** Capabilities to drop (VMMDEV_GUEST_SUPPORTS_XXX). */ + __u32 not_mask; + } in; + } u; +}; +VMMDEV_ASSERT_SIZE(vbg_ioctl_acquire_guest_caps, 24 + 12); + +#define VBGL_IOC_AGC_FLAGS_CONFIG_ACQUIRE_MODE 0x00000001 +#define VBGL_IOC_AGC_FLAGS_VALID_MASK 0x00000001 + +#define VBG_IOCTL_ACQUIRE_GUEST_CAPABILITIES \ + _IOWR('V', 13, struct vbg_ioctl_acquire_guest_caps) + + /** VBG_IOCTL_CHANGE_GUEST_CAPABILITIES data structure. */ struct vbg_ioctl_set_guest_caps { /** The header. */ diff --git a/init/Kconfig b/init/Kconfig index 0498af567f70..836f8adc6c26 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -191,13 +191,16 @@ config HAVE_KERNEL_LZO config HAVE_KERNEL_LZ4 bool +config HAVE_KERNEL_ZSTD + bool + config HAVE_KERNEL_UNCOMPRESSED bool choice prompt "Kernel compression mode" default KERNEL_GZIP - depends on HAVE_KERNEL_GZIP || HAVE_KERNEL_BZIP2 || HAVE_KERNEL_LZMA || HAVE_KERNEL_XZ || HAVE_KERNEL_LZO || HAVE_KERNEL_LZ4 || HAVE_KERNEL_UNCOMPRESSED + depends on HAVE_KERNEL_GZIP || HAVE_KERNEL_BZIP2 || HAVE_KERNEL_LZMA || HAVE_KERNEL_XZ || HAVE_KERNEL_LZO || HAVE_KERNEL_LZ4 || HAVE_KERNEL_ZSTD || HAVE_KERNEL_UNCOMPRESSED help The linux kernel is a kind of self-extracting executable. Several compression algorithms are available, which differ @@ -276,6 +279,16 @@ config KERNEL_LZ4 is about 8% bigger than LZO. But the decompression speed is faster than LZO. +config KERNEL_ZSTD + bool "ZSTD" + depends on HAVE_KERNEL_ZSTD + help + ZSTD is a compression algorithm targeting intermediate compression + with fast decompression speed. It will compress better than GZIP and + decompress around the same speed as LZO, but slower than LZ4. You + will need at least 192 KB RAM or more for booting. The zstd command + line tool is required for compression. + config KERNEL_UNCOMPRESSED bool "None" depends on HAVE_KERNEL_UNCOMPRESSED @@ -1140,6 +1153,22 @@ config USER_NS If unsure, say N. +config USER_NS_UNPRIVILEGED + bool "Allow unprivileged users to create namespaces" + default y + depends on USER_NS + help + When disabled, unprivileged users will not be able to create + new namespaces. Allowing users to create their own namespaces + has been part of several recent local privilege escalation + exploits, so if you need user namespaces but are + paranoid^Wsecurity-conscious you want to disable this. + + This setting can be overridden at runtime via the + kernel.unprivileged_userns_clone sysctl. + + If unsure, say Y. + config PID_NS bool "PID Namespaces" default y @@ -1278,7 +1307,6 @@ config CC_OPTIMIZE_FOR_PERFORMANCE config CC_OPTIMIZE_FOR_PERFORMANCE_O3 bool "Optimize more for performance (-O3)" - depends on ARC help Choosing this option will pass "-O3" to your compiler to optimize the kernel yet more for performance. diff --git a/kernel/fork.c b/kernel/fork.c index efc5493203ae..0646f384cf54 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -95,6 +95,7 @@ #include #include #include +#include #include #include @@ -1861,6 +1862,10 @@ static __latent_entropy struct task_struct *copy_process( if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS)) return ERR_PTR(-EINVAL); + if ((clone_flags & CLONE_NEWUSER) && !unprivileged_userns_clone) + if (!capable(CAP_SYS_ADMIN)) + return ERR_PTR(-EPERM); + /* * Thread groups must share signals as well, and detached threads * can only be started up within the thread group. @@ -2961,6 +2966,12 @@ int ksys_unshare(unsigned long unshare_flags) if (unshare_flags & CLONE_NEWNS) unshare_flags |= CLONE_FS; + if ((unshare_flags & CLONE_NEWUSER) && !unprivileged_userns_clone) { + err = -EPERM; + if (!capable(CAP_SYS_ADMIN)) + goto bad_unshare_out; + } + err = check_unshare_flags(unshare_flags); if (err) goto bad_unshare_out; diff --git a/kernel/futex.c b/kernel/futex.c index e646661f6282..d4916ef0e773 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -214,6 +214,8 @@ struct futex_pi_state { * @rt_waiter: rt_waiter storage for use with requeue_pi * @requeue_pi_key: the requeue_pi target futex key * @bitset: bitset for the optional bitmasked wakeup + * @uaddr: userspace address of futex + * @uval: expected futex's value * * We use this hashed waitqueue, instead of a normal wait_queue_entry_t, so * we can wake only the relevant ones (hashed queues may be shared). @@ -236,6 +238,8 @@ struct futex_q { struct rt_mutex_waiter *rt_waiter; union futex_key *requeue_pi_key; u32 bitset; + u32 __user *uaddr; + u32 uval; } __randomize_layout; static const struct futex_q futex_q_init = { @@ -2349,6 +2353,29 @@ static int unqueue_me(struct futex_q *q) return ret; } +/** + * unqueue_multiple() - Remove several futexes from their futex_hash_bucket + * @q: The list of futexes to unqueue + * @count: Number of futexes in the list + * + * Helper to unqueue a list of futexes. This can't fail. + * + * Return: + * - >=0 - Index of the last futex that was awoken; + * - -1 - If no futex was awoken + */ +static int unqueue_multiple(struct futex_q *q, int count) +{ + int ret = -1; + int i; + + for (i = 0; i < count; i++) { + if (!unqueue_me(&q[i])) + ret = i; + } + return ret; +} + /* * PI futexes can not be requeued and must remove themself from the * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry @@ -2712,6 +2739,211 @@ static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags, return ret; } +/** + * futex_wait_multiple_setup() - Prepare to wait and enqueue multiple futexes + * @qs: The corresponding futex list + * @count: The size of the lists + * @flags: Futex flags (FLAGS_SHARED, etc.) + * @awaken: Index of the last awoken futex + * + * Prepare multiple futexes in a single step and enqueue them. This may fail if + * the futex list is invalid or if any futex was already awoken. On success the + * task is ready to interruptible sleep. + * + * Return: + * - 1 - One of the futexes was awaken by another thread + * - 0 - Success + * - <0 - -EFAULT, -EWOULDBLOCK or -EINVAL + */ +static int futex_wait_multiple_setup(struct futex_q *qs, int count, + unsigned int flags, int *awaken) +{ + struct futex_hash_bucket *hb; + int ret, i; + u32 uval; + + /* + * Enqueuing multiple futexes is tricky, because we need to + * enqueue each futex in the list before dealing with the next + * one to avoid deadlocking on the hash bucket. But, before + * enqueuing, we need to make sure that current->state is + * TASK_INTERRUPTIBLE, so we don't absorb any awake events, which + * cannot be done before the get_futex_key of the next key, + * because it calls get_user_pages, which can sleep. Thus, we + * fetch the list of futexes keys in two steps, by first pinning + * all the memory keys in the futex key, and only then we read + * each key and queue the corresponding futex. + */ +retry: + for (i = 0; i < count; i++) { + qs[i].key = FUTEX_KEY_INIT; + ret = get_futex_key(qs[i].uaddr, flags & FLAGS_SHARED, + &qs[i].key, FUTEX_READ); + if (unlikely(ret)) { + for (--i; i >= 0; i--) + put_futex_key(&qs[i].key); + return ret; + } + } + + set_current_state(TASK_INTERRUPTIBLE); + + for (i = 0; i < count; i++) { + struct futex_q *q = &qs[i]; + + hb = queue_lock(q); + + ret = get_futex_value_locked(&uval, q->uaddr); + if (ret) { + /* + * We need to try to handle the fault, which + * cannot be done without sleep, so we need to + * undo all the work already done, to make sure + * we don't miss any wake ups. Therefore, clean + * up, handle the fault and retry from the + * beginning. + */ + queue_unlock(hb); + + /* + * Keys 0..(i-1) are implicitly put + * on unqueue_multiple. + */ + put_futex_key(&q->key); + + *awaken = unqueue_multiple(qs, i); + + __set_current_state(TASK_RUNNING); + + /* + * On a real fault, prioritize the error even if + * some other futex was awoken. Userspace gave + * us a bad address, -EFAULT them. + */ + ret = get_user(uval, q->uaddr); + if (ret) + return ret; + + /* + * Even if the page fault was handled, If + * something was already awaken, we can safely + * give up and succeed to give a hint for userspace to + * acquire the right futex faster. + */ + if (*awaken >= 0) + return 1; + + goto retry; + } + + if (uval != q->uval) { + queue_unlock(hb); + + put_futex_key(&qs[i].key); + + /* + * If something was already awaken, we can + * safely ignore the error and succeed. + */ + *awaken = unqueue_multiple(qs, i); + __set_current_state(TASK_RUNNING); + if (*awaken >= 0) + return 1; + + return -EWOULDBLOCK; + } + + /* + * The bucket lock can't be held while dealing with the + * next futex. Queue each futex at this moment so hb can + * be unlocked. + */ + queue_me(&qs[i], hb); + } + return 0; +} + +/** + * futex_wait_multiple() - Prepare to wait on and enqueue several futexes + * @qs: The list of futexes to wait on + * @op: Operation code from futex's syscall + * @count: The number of objects + * @abs_time: Timeout before giving up and returning to userspace + * + * Entry point for the FUTEX_WAIT_MULTIPLE futex operation, this function + * sleeps on a group of futexes and returns on the first futex that + * triggered, or after the timeout has elapsed. + * + * Return: + * - >=0 - Hint to the futex that was awoken + * - <0 - On error + */ +static int futex_wait_multiple(struct futex_q *qs, int op, + u32 count, ktime_t *abs_time) +{ + struct hrtimer_sleeper timeout, *to; + int ret, flags = 0, hint = 0; + unsigned int i; + + if (!(op & FUTEX_PRIVATE_FLAG)) + flags |= FLAGS_SHARED; + + if (op & FUTEX_CLOCK_REALTIME) + flags |= FLAGS_CLOCKRT; + + to = futex_setup_timer(abs_time, &timeout, flags, 0); + while (1) { + ret = futex_wait_multiple_setup(qs, count, flags, &hint); + if (ret) { + if (ret > 0) { + /* A futex was awaken during setup */ + ret = hint; + } + break; + } + + if (to) + hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS); + + /* + * Avoid sleeping if another thread already tried to + * wake us. + */ + for (i = 0; i < count; i++) { + if (plist_node_empty(&qs[i].list)) + break; + } + + if (i == count && (!to || to->task)) + freezable_schedule(); + + ret = unqueue_multiple(qs, count); + + __set_current_state(TASK_RUNNING); + + if (ret >= 0) + break; + if (to && !to->task) { + ret = -ETIMEDOUT; + break; + } else if (signal_pending(current)) { + ret = -ERESTARTSYS; + break; + } + /* + * The final case is a spurious wakeup, for + * which just retry. + */ + } + + if (to) { + hrtimer_cancel(&to->timer); + destroy_hrtimer_on_stack(&to->timer); + } + + return ret; +} + static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val, ktime_t *abs_time, u32 bitset) { @@ -3836,6 +4068,43 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, return -ENOSYS; } +/** + * futex_read_wait_block - Read an array of futex_wait_block from userspace + * @uaddr: Userspace address of the block + * @count: Number of blocks to be read + * + * This function creates and allocate an array of futex_q (we zero it to + * initialize the fields) and then, for each futex_wait_block element from + * userspace, fill a futex_q element with proper values. + */ +inline struct futex_q *futex_read_wait_block(u32 __user *uaddr, u32 count) +{ + unsigned int i; + struct futex_q *qs; + struct futex_wait_block fwb; + struct futex_wait_block __user *entry = + (struct futex_wait_block __user *)uaddr; + + if (!count || count > FUTEX_MULTIPLE_MAX_COUNT) + return ERR_PTR(-EINVAL); + + qs = kcalloc(count, sizeof(*qs), GFP_KERNEL); + if (!qs) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < count; i++) { + if (copy_from_user(&fwb, &entry[i], sizeof(fwb))) { + kfree(qs); + return ERR_PTR(-EFAULT); + } + + qs[i].uaddr = fwb.uaddr; + qs[i].uval = fwb.val; + qs[i].bitset = fwb.bitset; + } + + return qs; +} SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, struct __kernel_timespec __user *, utime, u32 __user *, uaddr2, @@ -3848,7 +4117,8 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI || cmd == FUTEX_WAIT_BITSET || - cmd == FUTEX_WAIT_REQUEUE_PI)) { + cmd == FUTEX_WAIT_REQUEUE_PI || + cmd == FUTEX_WAIT_MULTIPLE)) { if (unlikely(should_fail_futex(!(op & FUTEX_PRIVATE_FLAG)))) return -EFAULT; if (get_timespec64(&ts, utime)) @@ -3857,7 +4127,7 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, return -EINVAL; t = timespec64_to_ktime(ts); - if (cmd == FUTEX_WAIT) + if (cmd == FUTEX_WAIT || cmd == FUTEX_WAIT_MULTIPLE) t = ktime_add_safe(ktime_get(), t); tp = &t; } @@ -3869,6 +4139,25 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP) val2 = (u32) (unsigned long) utime; + if (cmd == FUTEX_WAIT_MULTIPLE) { + int ret; + struct futex_q *qs; + +#ifdef CONFIG_X86_X32 + if (unlikely(in_x32_syscall())) + return -ENOSYS; +#endif + qs = futex_read_wait_block(uaddr, val); + + if (IS_ERR(qs)) + return PTR_ERR(qs); + + ret = futex_wait_multiple(qs, op, val, tp); + kfree(qs); + + return ret; + } + return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); } @@ -4031,6 +4320,58 @@ COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid, #endif /* CONFIG_COMPAT */ #ifdef CONFIG_COMPAT_32BIT_TIME +/** + * struct compat_futex_wait_block - Block of futexes to be waited for + * @uaddr: User address of the futex (compatible pointer) + * @val: Futex value expected by userspace + * @bitset: Bitset for the optional bitmasked wakeup + */ +struct compat_futex_wait_block { + compat_uptr_t uaddr; + __u32 pad; + __u32 val; + __u32 bitset; +}; + +/** + * compat_futex_read_wait_block - Read an array of futex_wait_block from + * userspace + * @uaddr: Userspace address of the block + * @count: Number of blocks to be read + * + * This function does the same as futex_read_wait_block(), except that it + * converts the pointer to the futex from the compat version to the regular one. + */ +inline struct futex_q *compat_futex_read_wait_block(u32 __user *uaddr, + u32 count) +{ + unsigned int i; + struct futex_q *qs; + struct compat_futex_wait_block fwb; + struct compat_futex_wait_block __user *entry = + (struct compat_futex_wait_block __user *)uaddr; + + if (!count || count > FUTEX_MULTIPLE_MAX_COUNT) + return ERR_PTR(-EINVAL); + + qs = kcalloc(count, sizeof(*qs), GFP_KERNEL); + if (!qs) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < count; i++) { + if (copy_from_user(&fwb, &entry[i], sizeof(fwb))) { + kfree(qs); + return ERR_PTR(-EFAULT); + } + + qs[i].uaddr = compat_ptr(fwb.uaddr); + qs[i].uval = fwb.val; + qs[i].bitset = fwb.bitset; + } + + return qs; +} + SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val, struct old_timespec32 __user *, utime, u32 __user *, uaddr2, u32, val3) @@ -4042,14 +4383,15 @@ SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val, if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI || cmd == FUTEX_WAIT_BITSET || - cmd == FUTEX_WAIT_REQUEUE_PI)) { + cmd == FUTEX_WAIT_REQUEUE_PI || + cmd == FUTEX_WAIT_MULTIPLE)) { if (get_old_timespec32(&ts, utime)) return -EFAULT; if (!timespec64_valid(&ts)) return -EINVAL; t = timespec64_to_ktime(ts); - if (cmd == FUTEX_WAIT) + if (cmd == FUTEX_WAIT || cmd == FUTEX_WAIT_MULTIPLE) t = ktime_add_safe(ktime_get(), t); tp = &t; } @@ -4057,6 +4399,19 @@ SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val, cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP) val2 = (int) (unsigned long) utime; + if (cmd == FUTEX_WAIT_MULTIPLE) { + int ret; + struct futex_q *qs = compat_futex_read_wait_block(uaddr, val); + + if (IS_ERR(qs)) + return PTR_ERR(qs); + + ret = futex_wait_multiple(qs, op, val, tp); + kfree(qs); + + return ret; + } + return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); } #endif /* CONFIG_COMPAT_32BIT_TIME */ diff --git a/kernel/pid.c b/kernel/pid.c index f1496b757162..450d40469b1c 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -198,7 +198,7 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid, if (tid != 1 && !tmp->child_reaper) goto out_free; retval = -EPERM; - if (!ns_capable(tmp->user_ns, CAP_SYS_ADMIN)) + if (!checkpoint_restore_ns_capable(tmp->user_ns)) goto out_free; set_tid_size--; } diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c index 0e5ac162c3a8..ac135bd600eb 100644 --- a/kernel/pid_namespace.c +++ b/kernel/pid_namespace.c @@ -269,7 +269,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write, struct ctl_table tmp = *table; int ret, next; - if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN)) + if (write && !checkpoint_restore_ns_capable(pid_ns->user_ns)) return -EPERM; /* diff --git a/kernel/sys.c b/kernel/sys.c index 00a96746e28a..ca11af9d815d 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -2007,12 +2007,15 @@ static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data if (prctl_map.exe_fd != (u32)-1) { /* - * Make sure the caller has the rights to - * change /proc/pid/exe link: only local sys admin should - * be allowed to. + * Check if the current user is checkpoint/restore capable. + * At the time of this writing, it checks for CAP_SYS_ADMIN + * or CAP_CHECKPOINT_RESTORE. + * Note that a user with access to ptrace can masquerade an + * arbitrary program as any executable, even setuid ones. + * This may have implications in the tomoyo subsystem. */ - if (!ns_capable(current_user_ns(), CAP_SYS_ADMIN)) - return -EINVAL; + if (!checkpoint_restore_ns_capable(current_user_ns())) + return -EPERM; error = prctl_set_mm_exe_file(mm, prctl_map.exe_fd); if (error) diff --git a/kernel/sysctl.c b/kernel/sysctl.c index db1ce7af2563..26eae8e5585c 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -71,6 +71,7 @@ #include #include #include +#include #include "../lib/kstrtox.h" @@ -1882,6 +1883,15 @@ static struct ctl_table kern_table[] = { .proc_handler = proc_dointvec, }, #endif +#ifdef CONFIG_USER_NS + { + .procname = "unprivileged_userns_clone", + .data = &unprivileged_userns_clone, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, +#endif #ifdef CONFIG_PROC_SYSCTL { .procname = "tainted", @@ -2830,6 +2840,15 @@ static struct ctl_table vm_table[] = { .mode = 0200, .proc_handler = sysctl_compaction_handler, }, + { + .procname = "compaction_proactiveness", + .data = &sysctl_compaction_proactiveness, + .maxlen = sizeof(sysctl_compaction_proactiveness), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = &one_hundred, + }, { .procname = "extfrag_threshold", .data = &sysctl_extfrag_threshold, diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index 87804e0371fe..66b5afb0d0ee 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c @@ -21,6 +21,13 @@ #include #include +/* sysctl */ +#ifdef CONFIG_USER_NS_UNPRIVILEGED +int unprivileged_userns_clone = 1; +#else +int unprivileged_userns_clone; +#endif + static struct kmem_cache *user_ns_cachep __read_mostly; static DEFINE_MUTEX(userns_state_mutex); diff --git a/lib/Kconfig b/lib/Kconfig index df3f3da95990..a5d6f23c4cab 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -342,6 +342,10 @@ config DECOMPRESS_LZ4 select LZ4_DECOMPRESS tristate +config DECOMPRESS_ZSTD + select ZSTD_DECOMPRESS + tristate + # # Generic allocator support is selected if needed # diff --git a/lib/Makefile b/lib/Makefile index b1c42c10073b..2ba9642a3a87 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -170,6 +170,7 @@ lib-$(CONFIG_DECOMPRESS_LZMA) += decompress_unlzma.o lib-$(CONFIG_DECOMPRESS_XZ) += decompress_unxz.o lib-$(CONFIG_DECOMPRESS_LZO) += decompress_unlzo.o lib-$(CONFIG_DECOMPRESS_LZ4) += decompress_unlz4.o +lib-$(CONFIG_DECOMPRESS_ZSTD) += decompress_unzstd.o obj-$(CONFIG_TEXTSEARCH) += textsearch.o obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o diff --git a/lib/decompress.c b/lib/decompress.c index 857ab1af1ef3..ab3fc90ffc64 100644 --- a/lib/decompress.c +++ b/lib/decompress.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -37,6 +38,9 @@ #ifndef CONFIG_DECOMPRESS_LZ4 # define unlz4 NULL #endif +#ifndef CONFIG_DECOMPRESS_ZSTD +# define unzstd NULL +#endif struct compress_format { unsigned char magic[2]; @@ -52,6 +56,7 @@ static const struct compress_format compressed_formats[] __initconst = { { {0xfd, 0x37}, "xz", unxz }, { {0x89, 0x4c}, "lzo", unlzo }, { {0x02, 0x21}, "lz4", unlz4 }, + { {0x28, 0xb5}, "zstd", unzstd }, { {0, 0}, NULL, NULL } }; diff --git a/lib/decompress_unzstd.c b/lib/decompress_unzstd.c new file mode 100644 index 000000000000..0ad2c15479ed --- /dev/null +++ b/lib/decompress_unzstd.c @@ -0,0 +1,345 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Important notes about in-place decompression + * + * At least on x86, the kernel is decompressed in place: the compressed data + * is placed to the end of the output buffer, and the decompressor overwrites + * most of the compressed data. There must be enough safety margin to + * guarantee that the write position is always behind the read position. + * + * The safety margin for ZSTD with a 128 KB block size is calculated below. + * Note that the margin with ZSTD is bigger than with GZIP or XZ! + * + * The worst case for in-place decompression is that the beginning of + * the file is compressed extremely well, and the rest of the file is + * uncompressible. Thus, we must look for worst-case expansion when the + * compressor is encoding uncompressible data. + * + * The structure of the .zst file in case of a compresed kernel is as follows. + * Maximum sizes (as bytes) of the fields are in parenthesis. + * + * Frame Header: (18) + * Blocks: (N) + * Checksum: (4) + * + * The frame header and checksum overhead is at most 22 bytes. + * + * ZSTD stores the data in blocks. Each block has a header whose size is + * a 3 bytes. After the block header, there is up to 128 KB of payload. + * The maximum uncompressed size of the payload is 128 KB. The minimum + * uncompressed size of the payload is never less than the payload size + * (excluding the block header). + * + * The assumption, that the uncompressed size of the payload is never + * smaller than the payload itself, is valid only when talking about + * the payload as a whole. It is possible that the payload has parts where + * the decompressor consumes more input than it produces output. Calculating + * the worst case for this would be tricky. Instead of trying to do that, + * let's simply make sure that the decompressor never overwrites any bytes + * of the payload which it is currently reading. + * + * Now we have enough information to calculate the safety margin. We need + * - 22 bytes for the .zst file format headers; + * - 3 bytes per every 128 KiB of uncompressed size (one block header per + * block); and + * - 128 KiB (biggest possible zstd block size) to make sure that the + * decompressor never overwrites anything from the block it is currently + * reading. + * + * We get the following formula: + * + * safety_margin = 22 + uncompressed_size * 3 / 131072 + 131072 + * <= 22 + (uncompressed_size >> 15) + 131072 + */ + +/* + * Preboot environments #include "path/to/decompress_unzstd.c". + * All of the source files we depend on must be #included. + * zstd's only source dependeny is xxhash, which has no source + * dependencies. + * + * When UNZSTD_PREBOOT is defined we declare __decompress(), which is + * used for kernel decompression, instead of unzstd(). + * + * Define __DISABLE_EXPORTS in preboot environments to prevent symbols + * from xxhash and zstd from being exported by the EXPORT_SYMBOL macro. + */ +#ifdef STATIC +# define UNZSTD_PREBOOT +# include "xxhash.c" +# include "zstd/entropy_common.c" +# include "zstd/fse_decompress.c" +# include "zstd/huf_decompress.c" +# include "zstd/zstd_common.c" +# include "zstd/decompress.c" +#endif + +#include +#include +#include + +/* 128MB is the maximum window size supported by zstd. */ +#define ZSTD_WINDOWSIZE_MAX (1 << ZSTD_WINDOWLOG_MAX) +/* + * Size of the input and output buffers in multi-call mode. + * Pick a larger size because it isn't used during kernel decompression, + * since that is single pass, and we have to allocate a large buffer for + * zstd's window anyway. The larger size speeds up initramfs decompression. + */ +#define ZSTD_IOBUF_SIZE (1 << 17) + +static int INIT handle_zstd_error(size_t ret, void (*error)(char *x)) +{ + const int err = ZSTD_getErrorCode(ret); + + if (!ZSTD_isError(ret)) + return 0; + + switch (err) { + case ZSTD_error_memory_allocation: + error("ZSTD decompressor ran out of memory"); + break; + case ZSTD_error_prefix_unknown: + error("Input is not in the ZSTD format (wrong magic bytes)"); + break; + case ZSTD_error_dstSize_tooSmall: + case ZSTD_error_corruption_detected: + case ZSTD_error_checksum_wrong: + error("ZSTD-compressed data is corrupt"); + break; + default: + error("ZSTD-compressed data is probably corrupt"); + break; + } + return -1; +} + +/* + * Handle the case where we have the entire input and output in one segment. + * We can allocate less memory (no circular buffer for the sliding window), + * and avoid some memcpy() calls. + */ +static int INIT decompress_single(const u8 *in_buf, long in_len, u8 *out_buf, + long out_len, long *in_pos, + void (*error)(char *x)) +{ + const size_t wksp_size = ZSTD_DCtxWorkspaceBound(); + void *wksp = large_malloc(wksp_size); + ZSTD_DCtx *dctx = ZSTD_initDCtx(wksp, wksp_size); + int err; + size_t ret; + + if (dctx == NULL) { + error("Out of memory while allocating ZSTD_DCtx"); + err = -1; + goto out; + } + /* + * Find out how large the frame actually is, there may be junk at + * the end of the frame that ZSTD_decompressDCtx() can't handle. + */ + ret = ZSTD_findFrameCompressedSize(in_buf, in_len); + err = handle_zstd_error(ret, error); + if (err) + goto out; + in_len = (long)ret; + + ret = ZSTD_decompressDCtx(dctx, out_buf, out_len, in_buf, in_len); + err = handle_zstd_error(ret, error); + if (err) + goto out; + + if (in_pos != NULL) + *in_pos = in_len; + + err = 0; +out: + if (wksp != NULL) + large_free(wksp); + return err; +} + +static int INIT __unzstd(unsigned char *in_buf, long in_len, + long (*fill)(void*, unsigned long), + long (*flush)(void*, unsigned long), + unsigned char *out_buf, long out_len, + long *in_pos, + void (*error)(char *x)) +{ + ZSTD_inBuffer in; + ZSTD_outBuffer out; + ZSTD_frameParams params; + void *in_allocated = NULL; + void *out_allocated = NULL; + void *wksp = NULL; + size_t wksp_size; + ZSTD_DStream *dstream; + int err; + size_t ret; + + if (out_len == 0) + out_len = LONG_MAX; /* no limit */ + + if (fill == NULL && flush == NULL) + /* + * We can decompress faster and with less memory when we have a + * single chunk. + */ + return decompress_single(in_buf, in_len, out_buf, out_len, + in_pos, error); + + /* + * If in_buf is not provided, we must be using fill(), so allocate + * a large enough buffer. If it is provided, it must be at least + * ZSTD_IOBUF_SIZE large. + */ + if (in_buf == NULL) { + in_allocated = large_malloc(ZSTD_IOBUF_SIZE); + if (in_allocated == NULL) { + error("Out of memory while allocating input buffer"); + err = -1; + goto out; + } + in_buf = in_allocated; + in_len = 0; + } + /* Read the first chunk, since we need to decode the frame header. */ + if (fill != NULL) + in_len = fill(in_buf, ZSTD_IOBUF_SIZE); + if (in_len < 0) { + error("ZSTD-compressed data is truncated"); + err = -1; + goto out; + } + /* Set the first non-empty input buffer. */ + in.src = in_buf; + in.pos = 0; + in.size = in_len; + /* Allocate the output buffer if we are using flush(). */ + if (flush != NULL) { + out_allocated = large_malloc(ZSTD_IOBUF_SIZE); + if (out_allocated == NULL) { + error("Out of memory while allocating output buffer"); + err = -1; + goto out; + } + out_buf = out_allocated; + out_len = ZSTD_IOBUF_SIZE; + } + /* Set the output buffer. */ + out.dst = out_buf; + out.pos = 0; + out.size = out_len; + + /* + * We need to know the window size to allocate the ZSTD_DStream. + * Since we are streaming, we need to allocate a buffer for the sliding + * window. The window size varies from 1 KB to ZSTD_WINDOWSIZE_MAX + * (8 MB), so it is important to use the actual value so as not to + * waste memory when it is smaller. + */ + ret = ZSTD_getFrameParams(¶ms, in.src, in.size); + err = handle_zstd_error(ret, error); + if (err) + goto out; + if (ret != 0) { + error("ZSTD-compressed data has an incomplete frame header"); + err = -1; + goto out; + } + if (params.windowSize > ZSTD_WINDOWSIZE_MAX) { + error("ZSTD-compressed data has too large a window size"); + err = -1; + goto out; + } + + /* + * Allocate the ZSTD_DStream now that we know how much memory is + * required. + */ + wksp_size = ZSTD_DStreamWorkspaceBound(params.windowSize); + wksp = large_malloc(wksp_size); + dstream = ZSTD_initDStream(params.windowSize, wksp, wksp_size); + if (dstream == NULL) { + error("Out of memory while allocating ZSTD_DStream"); + err = -1; + goto out; + } + + /* + * Decompression loop: + * Read more data if necessary (error if no more data can be read). + * Call the decompression function, which returns 0 when finished. + * Flush any data produced if using flush(). + */ + if (in_pos != NULL) + *in_pos = 0; + do { + /* + * If we need to reload data, either we have fill() and can + * try to get more data, or we don't and the input is truncated. + */ + if (in.pos == in.size) { + if (in_pos != NULL) + *in_pos += in.pos; + in_len = fill ? fill(in_buf, ZSTD_IOBUF_SIZE) : -1; + if (in_len < 0) { + error("ZSTD-compressed data is truncated"); + err = -1; + goto out; + } + in.pos = 0; + in.size = in_len; + } + /* Returns zero when the frame is complete. */ + ret = ZSTD_decompressStream(dstream, &out, &in); + err = handle_zstd_error(ret, error); + if (err) + goto out; + /* Flush all of the data produced if using flush(). */ + if (flush != NULL && out.pos > 0) { + if (out.pos != flush(out.dst, out.pos)) { + error("Failed to flush()"); + err = -1; + goto out; + } + out.pos = 0; + } + } while (ret != 0); + + if (in_pos != NULL) + *in_pos += in.pos; + + err = 0; +out: + if (in_allocated != NULL) + large_free(in_allocated); + if (out_allocated != NULL) + large_free(out_allocated); + if (wksp != NULL) + large_free(wksp); + return err; +} + +#ifndef UNZSTD_PREBOOT +STATIC int INIT unzstd(unsigned char *buf, long len, + long (*fill)(void*, unsigned long), + long (*flush)(void*, unsigned long), + unsigned char *out_buf, + long *pos, + void (*error)(char *x)) +{ + return __unzstd(buf, len, fill, flush, out_buf, 0, pos, error); +} +#else +STATIC int INIT __decompress(unsigned char *buf, long len, + long (*fill)(void*, unsigned long), + long (*flush)(void*, unsigned long), + unsigned char *out_buf, long out_len, + long *pos, + void (*error)(char *x)) +{ + return __unzstd(buf, len, fill, flush, out_buf, out_len, pos, error); +} +#endif diff --git a/lib/zstd/fse_decompress.c b/lib/zstd/fse_decompress.c index a84300e5a013..0b353530fb3f 100644 --- a/lib/zstd/fse_decompress.c +++ b/lib/zstd/fse_decompress.c @@ -47,6 +47,7 @@ ****************************************************************/ #include "bitstream.h" #include "fse.h" +#include "zstd_internal.h" #include #include #include /* memcpy, memset */ @@ -60,14 +61,6 @@ enum { FSE_static_assert = 1 / (int)(!!(c)) }; \ } /* use only *after* variable declarations */ -/* check and forward error code */ -#define CHECK_F(f) \ - { \ - size_t const e = f; \ - if (FSE_isError(e)) \ - return e; \ - } - /* ************************************************************** * Templates ****************************************************************/ diff --git a/lib/zstd/zstd_internal.h b/lib/zstd/zstd_internal.h index 1a79fab9e13a..dac753397f86 100644 --- a/lib/zstd/zstd_internal.h +++ b/lib/zstd/zstd_internal.h @@ -127,7 +127,14 @@ static const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG; * Shared functions to include for inlining *********************************************/ ZSTD_STATIC void ZSTD_copy8(void *dst, const void *src) { - memcpy(dst, src, 8); + /* + * zstd relies heavily on gcc being able to analyze and inline this + * memcpy() call, since it is called in a tight loop. Preboot mode + * is compiled in freestanding mode, which stops gcc from analyzing + * memcpy(). Use __builtin_memcpy() to tell gcc to analyze this as a + * regular memcpy(). + */ + __builtin_memcpy(dst, src, 8); } /*! ZSTD_wildcopy() : * custom version of memcpy(), can copy up to 7 bytes too many (8 bytes if length==0) */ @@ -137,13 +144,16 @@ ZSTD_STATIC void ZSTD_wildcopy(void *dst, const void *src, ptrdiff_t length) const BYTE* ip = (const BYTE*)src; BYTE* op = (BYTE*)dst; BYTE* const oend = op + length; - /* Work around https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81388. +#if defined(GCC_VERSION) && GCC_VERSION >= 70000 && GCC_VERSION < 70200 + /* + * Work around https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81388. * Avoid the bad case where the loop only runs once by handling the * special case separately. This doesn't trigger the bug because it * doesn't involve pointer/integer overflow. */ if (length <= 8) return ZSTD_copy8(dst, src); +#endif do { ZSTD_copy8(op, ip); op += 8; diff --git a/mm/compaction.c b/mm/compaction.c index 86375605faa9..b7d433f1706a 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -50,6 +50,24 @@ static inline void count_compact_events(enum vm_event_item item, long delta) #define pageblock_start_pfn(pfn) block_start_pfn(pfn, pageblock_order) #define pageblock_end_pfn(pfn) block_end_pfn(pfn, pageblock_order) +/* + * Fragmentation score check interval for proactive compaction purposes. + */ +static const unsigned int HPAGE_FRAG_CHECK_INTERVAL_MSEC = 500; + +/* + * Page order with-respect-to which proactive compaction + * calculates external fragmentation, which is used as + * the "fragmentation score" of a node/zone. + */ +#if defined CONFIG_TRANSPARENT_HUGEPAGE +#define COMPACTION_HPAGE_ORDER HPAGE_PMD_ORDER +#elif defined CONFIG_HUGETLBFS +#define COMPACTION_HPAGE_ORDER HUGETLB_PAGE_ORDER +#else +#define COMPACTION_HPAGE_ORDER (PMD_SHIFT - PAGE_SHIFT) +#endif + static unsigned long release_freepages(struct list_head *freelist) { struct page *page, *next; @@ -1857,6 +1875,76 @@ static inline bool is_via_compact_memory(int order) return order == -1; } +static bool kswapd_is_running(pg_data_t *pgdat) +{ + return pgdat->kswapd && (pgdat->kswapd->state == TASK_RUNNING); +} + +/* + * A zone's fragmentation score is the external fragmentation wrt to the + * COMPACTION_HPAGE_ORDER scaled by the zone's size. It returns a value + * in the range [0, 100]. + * + * The scaling factor ensures that proactive compaction focuses on larger + * zones like ZONE_NORMAL, rather than smaller, specialized zones like + * ZONE_DMA32. For smaller zones, the score value remains close to zero, + * and thus never exceeds the high threshold for proactive compaction. + */ +static unsigned int fragmentation_score_zone(struct zone *zone) +{ + unsigned long score; + + score = zone->present_pages * + extfrag_for_order(zone, COMPACTION_HPAGE_ORDER); + return div64_ul(score, zone->zone_pgdat->node_present_pages + 1); +} + +/* + * The per-node proactive (background) compaction process is started by its + * corresponding kcompactd thread when the node's fragmentation score + * exceeds the high threshold. The compaction process remains active till + * the node's score falls below the low threshold, or one of the back-off + * conditions is met. + */ +static unsigned int fragmentation_score_node(pg_data_t *pgdat) +{ + unsigned int score = 0; + int zoneid; + + for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { + struct zone *zone; + + zone = &pgdat->node_zones[zoneid]; + score += fragmentation_score_zone(zone); + } + + return score; +} + +static unsigned int fragmentation_score_wmark(pg_data_t *pgdat, bool low) +{ + unsigned int wmark_low; + + /* + * Cap the low watermak to avoid excessive compaction + * activity in case a user sets the proactivess tunable + * close to 100 (maximum). + */ + wmark_low = max(100U - sysctl_compaction_proactiveness, 5U); + return low ? wmark_low : min(wmark_low + 10, 100U); +} + +static bool should_proactive_compact_node(pg_data_t *pgdat) +{ + int wmark_high; + + if (!sysctl_compaction_proactiveness || kswapd_is_running(pgdat)) + return false; + + wmark_high = fragmentation_score_wmark(pgdat, false); + return fragmentation_score_node(pgdat) > wmark_high; +} + static enum compact_result __compact_finished(struct compact_control *cc) { unsigned int order; @@ -1883,6 +1971,25 @@ static enum compact_result __compact_finished(struct compact_control *cc) return COMPACT_PARTIAL_SKIPPED; } + if (cc->proactive_compaction) { + int score, wmark_low; + pg_data_t *pgdat; + + pgdat = cc->zone->zone_pgdat; + if (kswapd_is_running(pgdat)) + return COMPACT_PARTIAL_SKIPPED; + + score = fragmentation_score_zone(cc->zone); + wmark_low = fragmentation_score_wmark(pgdat, true); + + if (score > wmark_low) + ret = COMPACT_CONTINUE; + else + ret = COMPACT_SUCCESS; + + goto out; + } + if (is_via_compact_memory(cc->order)) return COMPACT_CONTINUE; @@ -1941,6 +2048,7 @@ static enum compact_result __compact_finished(struct compact_control *cc) } } +out: if (cc->contended || fatal_signal_pending(current)) ret = COMPACT_CONTENDED; @@ -2421,6 +2529,41 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, return rc; } +/* + * Compact all zones within a node till each zone's fragmentation score + * reaches within proactive compaction thresholds (as determined by the + * proactiveness tunable). + * + * It is possible that the function returns before reaching score targets + * due to various back-off conditions, such as, contention on per-node or + * per-zone locks. + */ +static void proactive_compact_node(pg_data_t *pgdat) +{ + int zoneid; + struct zone *zone; + struct compact_control cc = { + .order = -1, + .mode = MIGRATE_SYNC_LIGHT, + .ignore_skip_hint = true, + .whole_zone = true, + .gfp_mask = GFP_KERNEL, + .proactive_compaction = true, + }; + + for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { + zone = &pgdat->node_zones[zoneid]; + if (!populated_zone(zone)) + continue; + + cc.zone = zone; + + compact_zone(&cc, NULL); + + VM_BUG_ON(!list_empty(&cc.freepages)); + VM_BUG_ON(!list_empty(&cc.migratepages)); + } +} /* Compact all zones within a node */ static void compact_node(int nid) @@ -2467,6 +2610,13 @@ static void compact_nodes(void) /* The written value is actually unused, all memory is compacted */ int sysctl_compact_memory; +/* + * Tunable for proactive compaction. It determines how + * aggressively the kernel should compact memory in the + * background. It takes values in the range [0, 100]. + */ +unsigned int __read_mostly sysctl_compaction_proactiveness = 20; + /* * This is the entry point for compacting all nodes via * /proc/sys/vm/compact_memory @@ -2646,6 +2796,7 @@ static int kcompactd(void *p) { pg_data_t *pgdat = (pg_data_t*)p; struct task_struct *tsk = current; + unsigned int proactive_defer = 0; const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); @@ -2661,12 +2812,34 @@ static int kcompactd(void *p) unsigned long pflags; trace_mm_compaction_kcompactd_sleep(pgdat->node_id); - wait_event_freezable(pgdat->kcompactd_wait, - kcompactd_work_requested(pgdat)); + if (wait_event_freezable_timeout(pgdat->kcompactd_wait, + kcompactd_work_requested(pgdat), + msecs_to_jiffies(HPAGE_FRAG_CHECK_INTERVAL_MSEC))) { + + psi_memstall_enter(&pflags); + kcompactd_do_work(pgdat); + psi_memstall_leave(&pflags); + continue; + } - psi_memstall_enter(&pflags); - kcompactd_do_work(pgdat); - psi_memstall_leave(&pflags); + /* kcompactd wait timeout */ + if (should_proactive_compact_node(pgdat)) { + unsigned int prev_score, score; + + if (proactive_defer) { + proactive_defer--; + continue; + } + prev_score = fragmentation_score_node(pgdat); + proactive_compact_node(pgdat); + score = fragmentation_score_node(pgdat); + /* + * Defer proactive compaction if the fragmentation + * score did not go down i.e. no progress made. + */ + proactive_defer = score < prev_score ? + 0 : 1 << COMPACT_MAX_DEFER_SHIFT; + } } return 0; diff --git a/mm/internal.h b/mm/internal.h index 9886db20d94f..42cf0b610847 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -239,6 +239,7 @@ struct compact_control { bool no_set_skip_hint; /* Don't mark blocks for skipping */ bool ignore_block_suitable; /* Scan blocks considered unsuitable */ bool direct_compaction; /* False from kcompactd or /proc/... */ + bool proactive_compaction; /* kcompactd proactive compaction */ bool whole_zone; /* Whole zone should/has been scanned */ bool contended; /* Signal lock or sched contention */ bool rescan; /* Rescanning the same pageblock */ diff --git a/mm/ksm.c b/mm/ksm.c index 4102034cd55a..61bdad624f18 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -2433,54 +2433,78 @@ static int ksm_scan_thread(void *nothing) return 0; } -int ksm_madvise(struct vm_area_struct *vma, unsigned long start, - unsigned long end, int advice, unsigned long *vm_flags) +int ksm_madvise_merge(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long *vm_flags) { - struct mm_struct *mm = vma->vm_mm; int err; - switch (advice) { - case MADV_MERGEABLE: - /* - * Be somewhat over-protective for now! - */ - if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE | - VM_PFNMAP | VM_IO | VM_DONTEXPAND | - VM_HUGETLB | VM_MIXEDMAP)) - return 0; /* just ignore the advice */ + /* + * Be somewhat over-protective for now! + */ + if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE | + VM_PFNMAP | VM_IO | VM_DONTEXPAND | + VM_HUGETLB | VM_MIXEDMAP)) + return 0; /* just ignore the advice */ - if (vma_is_dax(vma)) - return 0; + if (vma_is_dax(vma)) + return 0; #ifdef VM_SAO - if (*vm_flags & VM_SAO) - return 0; + if (*vm_flags & VM_SAO) + return 0; #endif #ifdef VM_SPARC_ADI - if (*vm_flags & VM_SPARC_ADI) - return 0; + if (*vm_flags & VM_SPARC_ADI) + return 0; #endif - if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) { - err = __ksm_enter(mm); - if (err) - return err; - } + if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) { + err = __ksm_enter(mm); + if (err) + return err; + } - *vm_flags |= VM_MERGEABLE; - break; + *vm_flags |= VM_MERGEABLE; - case MADV_UNMERGEABLE: - if (!(*vm_flags & VM_MERGEABLE)) - return 0; /* just ignore the advice */ + return 0; +} - if (vma->anon_vma) { - err = unmerge_ksm_pages(vma, start, end); - if (err) - return err; - } +int ksm_madvise_unmerge(struct vm_area_struct *vma, unsigned long start, + unsigned long end, unsigned long *vm_flags) +{ + int err; + + if (!(*vm_flags & VM_MERGEABLE)) + return 0; /* just ignore the advice */ + + if (vma->anon_vma) { + err = unmerge_ksm_pages(vma, start, end); + if (err) + return err; + } + + *vm_flags &= ~VM_MERGEABLE; + + return 0; +} + +int ksm_madvise(struct vm_area_struct *vma, unsigned long start, + unsigned long end, int advice, unsigned long *vm_flags) +{ + struct mm_struct *mm = vma->vm_mm; + int err; - *vm_flags &= ~VM_MERGEABLE; + switch (advice) { + case MADV_MERGEABLE: + err = ksm_madvise_merge(mm, vma, vm_flags); + if (err) + return err; + break; + + case MADV_UNMERGEABLE: + err = ksm_madvise_unmerge(vma, start, end, vm_flags); + if (err) + return err; break; } diff --git a/mm/vmstat.c b/mm/vmstat.c index 3fb23a21f6dd..b1de695b826d 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1074,6 +1074,24 @@ static int __fragmentation_index(unsigned int order, struct contig_page_info *in return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total); } +/* + * Calculates external fragmentation within a zone wrt the given order. + * It is defined as the percentage of pages found in blocks of size + * less than 1 << order. It returns values in range [0, 100]. + */ +unsigned int extfrag_for_order(struct zone *zone, unsigned int order) +{ + struct contig_page_info info; + + fill_contig_page_info(zone, order, &info); + if (info.free_pages == 0) + return 0; + + return div_u64((info.free_pages - + (info.free_blocks_suitable << order)) * 100, + info.free_pages); +} + /* Same as __fragmentation index but allocs contig_page_info on stack */ int fragmentation_index(struct zone *zone, unsigned int order) { diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index 916b2f7f7098..54f7b7eb580b 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib @@ -413,6 +413,28 @@ quiet_cmd_xzkern = XZKERN $@ quiet_cmd_xzmisc = XZMISC $@ cmd_xzmisc = cat $(real-prereqs) | $(XZ) --check=crc32 --lzma2=dict=1MiB > $@ +# ZSTD +# --------------------------------------------------------------------------- +# Appends the uncompressed size of the data using size_append. The .zst +# format has the size information available at the beginning of the file too, +# but it's in a more complex format and it's good to avoid changing the part +# of the boot code that reads the uncompressed size. +# +# Note that the bytes added by size_append will make the zstd tool think that +# the file is corrupt. This is expected. +# +# zstd uses a maximum window size of 8 MB. zstd22 uses a maximum window size of +# 128 MB. zstd22 is used for kernel compression because it is decompressed in a +# single pass, so zstd doesn't need to allocate a window buffer. When streaming +# decompression is used, like initramfs decompression, zstd22 should likely not +# be used because it would require zstd to allocate a 128 MB buffer. + +quiet_cmd_zstd = ZSTD $@ + cmd_zstd = { cat $(real-prereqs) | $(ZSTD) -19; $(size_append); } > $@ + +quiet_cmd_zstd22 = ZSTD22 $@ + cmd_zstd22 = { cat $(real-prereqs) | $(ZSTD) -22 --ultra; $(size_append); } > $@ + # ASM offsets # --------------------------------------------------------------------------- diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h index 98e1513b608a..40cebde62856 100644 --- a/security/selinux/include/classmap.h +++ b/security/selinux/include/classmap.h @@ -27,9 +27,10 @@ "audit_control", "setfcap" #define COMMON_CAP2_PERMS "mac_override", "mac_admin", "syslog", \ - "wake_alarm", "block_suspend", "audit_read", "perfmon", "bpf" + "wake_alarm", "block_suspend", "audit_read", "perfmon", "bpf", \ + "checkpoint_restore" -#if CAP_LAST_CAP > CAP_BPF +#if CAP_LAST_CAP > CAP_CHECKPOINT_RESTORE #error New capability defined, please update COMMON_CAP2_PERMS. #endif diff --git a/tools/testing/selftests/clone3/.gitignore b/tools/testing/selftests/clone3/.gitignore index a81085742d40..83c0f6246055 100644 --- a/tools/testing/selftests/clone3/.gitignore +++ b/tools/testing/selftests/clone3/.gitignore @@ -2,3 +2,4 @@ clone3 clone3_clear_sighand clone3_set_tid +clone3_cap_checkpoint_restore diff --git a/tools/testing/selftests/clone3/Makefile b/tools/testing/selftests/clone3/Makefile index cf976c732906..ef7564cb7abe 100644 --- a/tools/testing/selftests/clone3/Makefile +++ b/tools/testing/selftests/clone3/Makefile @@ -1,6 +1,8 @@ # SPDX-License-Identifier: GPL-2.0 CFLAGS += -g -I../../../../usr/include/ +LDLIBS += -lcap -TEST_GEN_PROGS := clone3 clone3_clear_sighand clone3_set_tid +TEST_GEN_PROGS := clone3 clone3_clear_sighand clone3_set_tid \ + clone3_cap_checkpoint_restore include ../lib.mk diff --git a/tools/testing/selftests/clone3/clone3_cap_checkpoint_restore.c b/tools/testing/selftests/clone3/clone3_cap_checkpoint_restore.c new file mode 100644 index 000000000000..9562425aa0a9 --- /dev/null +++ b/tools/testing/selftests/clone3/clone3_cap_checkpoint_restore.c @@ -0,0 +1,182 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Based on Christian Brauner's clone3() example. + * These tests are assuming to be running in the host's + * PID namespace. + */ + +/* capabilities related code based on selftests/bpf/test_verifier.c */ + +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../kselftest_harness.h" +#include "clone3_selftests.h" + +#ifndef MAX_PID_NS_LEVEL +#define MAX_PID_NS_LEVEL 32 +#endif + +static void child_exit(int ret) +{ + fflush(stdout); + fflush(stderr); + _exit(ret); +} + +static int call_clone3_set_tid(struct __test_metadata *_metadata, + pid_t *set_tid, size_t set_tid_size) +{ + int status; + pid_t pid = -1; + + struct clone_args args = { + .exit_signal = SIGCHLD, + .set_tid = ptr_to_u64(set_tid), + .set_tid_size = set_tid_size, + }; + + pid = sys_clone3(&args, sizeof(struct clone_args)); + if (pid < 0) { + TH_LOG("%s - Failed to create new process", strerror(errno)); + return -errno; + } + + if (pid == 0) { + int ret; + char tmp = 0; + + TH_LOG("I am the child, my PID is %d (expected %d)", getpid(), set_tid[0]); + + if (set_tid[0] != getpid()) + child_exit(EXIT_FAILURE); + child_exit(EXIT_SUCCESS); + } + + TH_LOG("I am the parent (%d). My child's pid is %d", getpid(), pid); + + if (waitpid(pid, &status, 0) < 0) { + TH_LOG("Child returned %s", strerror(errno)); + return -errno; + } + + if (!WIFEXITED(status)) + return -1; + + return WEXITSTATUS(status); +} + +static int test_clone3_set_tid(struct __test_metadata *_metadata, + pid_t *set_tid, size_t set_tid_size) +{ + int ret; + + TH_LOG("[%d] Trying clone3() with CLONE_SET_TID to %d", getpid(), set_tid[0]); + ret = call_clone3_set_tid(_metadata, set_tid, set_tid_size); + TH_LOG("[%d] clone3() with CLONE_SET_TID %d says:%d", getpid(), set_tid[0], ret); + return ret; +} + +struct libcap { + struct __user_cap_header_struct hdr; + struct __user_cap_data_struct data[2]; +}; + +static int set_capability(void) +{ + cap_value_t cap_values[] = { CAP_SETUID, CAP_SETGID }; + struct libcap *cap; + int ret = -1; + cap_t caps; + + caps = cap_get_proc(); + if (!caps) { + perror("cap_get_proc"); + return -1; + } + + /* Drop all capabilities */ + if (cap_clear(caps)) { + perror("cap_clear"); + goto out; + } + + cap_set_flag(caps, CAP_EFFECTIVE, 2, cap_values, CAP_SET); + cap_set_flag(caps, CAP_PERMITTED, 2, cap_values, CAP_SET); + + cap = (struct libcap *) caps; + + /* 40 -> CAP_CHECKPOINT_RESTORE */ + cap->data[1].effective |= 1 << (40 - 32); + cap->data[1].permitted |= 1 << (40 - 32); + + if (cap_set_proc(caps)) { + perror("cap_set_proc"); + goto out; + } + ret = 0; +out: + if (cap_free(caps)) + perror("cap_free"); + return ret; +} + +TEST(clone3_cap_checkpoint_restore) +{ + pid_t pid; + int status; + int ret = 0; + pid_t set_tid[1]; + + test_clone3_supported(); + + EXPECT_EQ(getuid(), 0) + XFAIL(return, "Skipping all tests as non-root\n"); + + memset(&set_tid, 0, sizeof(set_tid)); + + /* Find the current active PID */ + pid = fork(); + if (pid == 0) { + TH_LOG("Child has PID %d", getpid()); + child_exit(EXIT_SUCCESS); + } + ASSERT_GT(waitpid(pid, &status, 0), 0) + TH_LOG("Waiting for child %d failed", pid); + + /* After the child has finished, its PID should be free. */ + set_tid[0] = pid; + + ASSERT_EQ(set_capability(), 0) + TH_LOG("Could not set CAP_CHECKPOINT_RESTORE"); + + ASSERT_EQ(prctl(PR_SET_KEEPCAPS, 1, 0, 0, 0), 0); + + EXPECT_EQ(setgid(65534), 0) + TH_LOG("Failed to setgid(65534)"); + ASSERT_EQ(setuid(65534), 0); + + set_tid[0] = pid; + /* This would fail without CAP_CHECKPOINT_RESTORE */ + ASSERT_EQ(test_clone3_set_tid(_metadata, set_tid, 1), -EPERM); + ASSERT_EQ(set_capability(), 0) + TH_LOG("Could not set CAP_CHECKPOINT_RESTORE"); + /* This should work as we have CAP_CHECKPOINT_RESTORE as non-root */ + ASSERT_EQ(test_clone3_set_tid(_metadata, set_tid, 1), 0); +} + +TEST_HARNESS_MAIN diff --git a/tools/testing/selftests/futex/functional/.gitignore b/tools/testing/selftests/futex/functional/.gitignore index 0efcd494daab..03a4bedcebc2 100644 --- a/tools/testing/selftests/futex/functional/.gitignore +++ b/tools/testing/selftests/futex/functional/.gitignore @@ -6,3 +6,4 @@ futex_wait_private_mapped_file futex_wait_timeout futex_wait_uninitialized_heap futex_wait_wouldblock +futex_wait_multiple diff --git a/tools/testing/selftests/futex/functional/Makefile b/tools/testing/selftests/futex/functional/Makefile index 23207829ec75..26562f2d792d 100644 --- a/tools/testing/selftests/futex/functional/Makefile +++ b/tools/testing/selftests/futex/functional/Makefile @@ -14,7 +14,8 @@ TEST_GEN_FILES := \ futex_requeue_pi_signal_restart \ futex_requeue_pi_mismatched_ops \ futex_wait_uninitialized_heap \ - futex_wait_private_mapped_file + futex_wait_private_mapped_file \ + futex_wait_multiple TEST_PROGS := run.sh diff --git a/tools/testing/selftests/futex/functional/futex_wait_multiple.c b/tools/testing/selftests/futex/functional/futex_wait_multiple.c new file mode 100644 index 000000000000..b48422e79f42 --- /dev/null +++ b/tools/testing/selftests/futex/functional/futex_wait_multiple.c @@ -0,0 +1,173 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/****************************************************************************** + * + * Copyright © Collabora, Ltd., 2019 + * + * DESCRIPTION + * Test basic semantics of FUTEX_WAIT_MULTIPLE + * + * AUTHOR + * Gabriel Krisman Bertazi + * + * HISTORY + * 2019-Dec-13: Initial version by Krisman + * + *****************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include "futextest.h" +#include "logging.h" + +#define TEST_NAME "futex-wait-multiple" +#define timeout_ns 100000 +#define MAX_COUNT 128 +#define WAKE_WAIT_US 3000000 + +int ret = RET_PASS; +char *progname; +futex_t f[MAX_COUNT] = {0}; +struct futex_wait_block fwb[MAX_COUNT]; + +void usage(char *prog) +{ + printf("Usage: %s\n", prog); + printf(" -c Use color\n"); + printf(" -h Display this help message\n"); + printf(" -v L Verbosity level: %d=QUIET %d=CRITICAL %d=INFO\n", + VQUIET, VCRITICAL, VINFO); +} + +void test_count_overflow(void) +{ + futex_t f = FUTEX_INITIALIZER; + struct futex_wait_block fwb[MAX_COUNT+1]; + int res, i; + + ksft_print_msg("%s: Test a too big number of futexes\n", progname); + + for (i = 0; i < MAX_COUNT+1; i++) { + fwb[i].uaddr = &f; + fwb[i].val = f; + fwb[i].bitset = 0; + } + + res = futex_wait_multiple(fwb, MAX_COUNT+1, NULL, FUTEX_PRIVATE_FLAG); + +#ifdef __ILP32__ + if (res != -1 || errno != ENOSYS) { + ksft_test_result_fail("futex_wait_multiple returned %d\n", + res < 0 ? errno : res); + ret = RET_FAIL; + } else { + ksft_test_result_skip("futex_wait_multiple not supported at x32\n"); + } +#else + if (res != -1 || errno != EINVAL) { + ksft_test_result_fail("futex_wait_multiple returned %d\n", + res < 0 ? errno : res); + ret = RET_FAIL; + } else { + ksft_test_result_pass("futex_wait_multiple count overflow succeed\n"); + } + +#endif /* __ILP32__ */ +} + +void *waiterfn(void *arg) +{ + int res; + + res = futex_wait_multiple(fwb, MAX_COUNT, NULL, FUTEX_PRIVATE_FLAG); + +#ifdef __ILP32__ + if (res != -1 || errno != ENOSYS) { + ksft_test_result_fail("futex_wait_multiple returned %d\n", + res < 0 ? errno : res); + ret = RET_FAIL; + } else { + ksft_test_result_skip("futex_wait_multiple not supported at x32\n"); + } +#else + if (res < 0) + ksft_print_msg("waiter failed %d\n", res); + + info("futex_wait_multiple: Got hint futex %d was freed\n", res); +#endif /* __ILP32__ */ + + return NULL; +} + +void test_fwb_wakeup(void) +{ + int res, i; + pthread_t waiter; + + ksft_print_msg("%s: Test wake up in a list of futex\n", progname); + + for (i = 0; i < MAX_COUNT; i++) { + fwb[i].uaddr = &f[i]; + fwb[i].val = f[i]; + fwb[i].bitset = 0xffffffff; + } + + res = pthread_create(&waiter, NULL, waiterfn, NULL); + if (res) { + ksft_test_result_fail("Creating waiting thread failed"); + ksft_exit_fail(); + } + + usleep(WAKE_WAIT_US); + res = futex_wake(&(f[MAX_COUNT-1]), 1, FUTEX_PRIVATE_FLAG); + if (res != 1) { + ksft_test_result_fail("Failed to wake thread res=%d\n", res); + ksft_exit_fail(); + } + + pthread_join(waiter, NULL); + ksft_test_result_pass("%s succeed\n", __func__); +} + +int main(int argc, char *argv[]) +{ + int c; + + while ((c = getopt(argc, argv, "cht:v:")) != -1) { + switch (c) { + case 'c': + log_color(1); + break; + case 'h': + usage(basename(argv[0])); + exit(0); + case 'v': + log_verbosity(atoi(optarg)); + break; + default: + usage(basename(argv[0])); + exit(1); + } + } + + progname = basename(argv[0]); + + ksft_print_header(); + ksft_set_plan(2); + + test_count_overflow(); + +#ifdef __ILP32__ + // if it's a 32x binary, there's no futex to wakeup + ksft_test_result_skip("futex_wait_multiple not supported at x32\n"); +#else + test_fwb_wakeup(); +#endif /* __ILP32__ */ + + ksft_print_cnts(); + return ret; +} diff --git a/tools/testing/selftests/futex/functional/futex_wait_timeout.c b/tools/testing/selftests/futex/functional/futex_wait_timeout.c index ee55e6d389a3..2a63e1c2cfb6 100644 --- a/tools/testing/selftests/futex/functional/futex_wait_timeout.c +++ b/tools/testing/selftests/futex/functional/futex_wait_timeout.c @@ -11,6 +11,7 @@ * * HISTORY * 2009-Nov-6: Initial version by Darren Hart + * 2019-Dec-13: Add WAIT_MULTIPLE test by Krisman * *****************************************************************************/ @@ -41,6 +42,8 @@ int main(int argc, char *argv[]) { futex_t f1 = FUTEX_INITIALIZER; struct timespec to; + time_t secs; + struct futex_wait_block fwb = {&f1, f1, 0}; int res, ret = RET_PASS; int c; @@ -65,7 +68,7 @@ int main(int argc, char *argv[]) } ksft_print_header(); - ksft_set_plan(1); + ksft_set_plan(2); ksft_print_msg("%s: Block on a futex and wait for timeout\n", basename(argv[0])); ksft_print_msg("\tArguments: timeout=%ldns\n", timeout_ns); @@ -79,8 +82,39 @@ int main(int argc, char *argv[]) if (!res || errno != ETIMEDOUT) { fail("futex_wait returned %d\n", ret < 0 ? errno : ret); ret = RET_FAIL; + } else + ksft_test_result_pass("futex_wait timeout succeeds\n"); + + info("Calling futex_wait_multiple on f1: %u @ %p\n", f1, &f1); + + /* Setup absolute time */ + ret = clock_gettime(CLOCK_REALTIME, &to); + secs = (to.tv_nsec + timeout_ns) / 1000000000; + to.tv_nsec = ((int64_t)to.tv_nsec + timeout_ns) % 1000000000; + to.tv_sec += secs; + info("to.tv_sec = %ld\n", to.tv_sec); + info("to.tv_nsec = %ld\n", to.tv_nsec); + + res = futex_wait_multiple(&fwb, 1, &to, + FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME); + +#ifdef __ILP32__ + if (res == -1 && errno == ENOSYS) { + ksft_test_result_skip("futex_wait_multiple not supported at x32\n"); + } else { + ksft_test_result_fail("futex_wait_multiple returned %d\n", + res < 0 ? errno : res); + ret = RET_FAIL; } +#else + if (!res || errno != ETIMEDOUT) { + ksft_test_result_fail("futex_wait_multiple returned %d\n", + res < 0 ? errno : res); + ret = RET_FAIL; + } else + ksft_test_result_pass("futex_wait_multiple timeout succeeds\n"); +#endif /* __ILP32__ */ - print_result(TEST_NAME, ret); + ksft_print_cnts(); return ret; } diff --git a/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c b/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c index 0ae390ff8164..bcbac042992d 100644 --- a/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c +++ b/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c @@ -12,6 +12,7 @@ * * HISTORY * 2009-Nov-14: Initial version by Gowrishankar + * 2019-Dec-13: Add WAIT_MULTIPLE test by Krisman * *****************************************************************************/ @@ -40,6 +41,7 @@ int main(int argc, char *argv[]) { struct timespec to = {.tv_sec = 0, .tv_nsec = timeout_ns}; futex_t f1 = FUTEX_INITIALIZER; + struct futex_wait_block fwb = {&f1, f1+1, 0}; int res, ret = RET_PASS; int c; @@ -61,7 +63,7 @@ int main(int argc, char *argv[]) } ksft_print_header(); - ksft_set_plan(1); + ksft_set_plan(2); ksft_print_msg("%s: Test the unexpected futex value in FUTEX_WAIT\n", basename(argv[0])); @@ -71,8 +73,30 @@ int main(int argc, char *argv[]) fail("futex_wait returned: %d %s\n", res ? errno : res, res ? strerror(errno) : ""); ret = RET_FAIL; + } else + ksft_test_result_pass("futex_wait wouldblock succeeds\n"); + + info("Calling futex_wait_multiple on f1: %u @ %p with val=%u\n", + f1, &f1, f1+1); + res = futex_wait_multiple(&fwb, 1, NULL, FUTEX_PRIVATE_FLAG); + +#ifdef __ILP32__ + if (res != -1 || errno != ENOSYS) { + ksft_test_result_fail("futex_wait_multiple returned %d\n", + res < 0 ? errno : res); + ret = RET_FAIL; + } else { + ksft_test_result_skip("futex_wait_multiple not supported at x32\n"); + } +#else + if (!res || errno != EWOULDBLOCK) { + ksft_test_result_fail("futex_wait_multiple returned %d\n", + res < 0 ? errno : res); + ret = RET_FAIL; } + ksft_test_result_pass("futex_wait_multiple wouldblock succeeds\n"); +#endif /* __ILP32__ */ - print_result(TEST_NAME, ret); + ksft_print_cnts(); return ret; } diff --git a/tools/testing/selftests/futex/functional/run.sh b/tools/testing/selftests/futex/functional/run.sh index 1acb6ace1680..a8be94f28ff7 100755 --- a/tools/testing/selftests/futex/functional/run.sh +++ b/tools/testing/selftests/futex/functional/run.sh @@ -73,3 +73,6 @@ echo echo ./futex_wait_uninitialized_heap $COLOR ./futex_wait_private_mapped_file $COLOR + +echo +./futex_wait_multiple $COLOR diff --git a/tools/testing/selftests/futex/include/futextest.h b/tools/testing/selftests/futex/include/futextest.h index ddbcfc9b7bac..bb103bef4557 100644 --- a/tools/testing/selftests/futex/include/futextest.h +++ b/tools/testing/selftests/futex/include/futextest.h @@ -38,6 +38,14 @@ typedef volatile u_int32_t futex_t; #ifndef FUTEX_CMP_REQUEUE_PI #define FUTEX_CMP_REQUEUE_PI 12 #endif +#ifndef FUTEX_WAIT_MULTIPLE +#define FUTEX_WAIT_MULTIPLE 13 +struct futex_wait_block { + futex_t *uaddr; + futex_t val; + __u32 bitset; +}; +#endif #ifndef FUTEX_WAIT_REQUEUE_PI_PRIVATE #define FUTEX_WAIT_REQUEUE_PI_PRIVATE (FUTEX_WAIT_REQUEUE_PI | \ FUTEX_PRIVATE_FLAG) @@ -80,6 +88,20 @@ futex_wait(futex_t *uaddr, futex_t val, struct timespec *timeout, int opflags) return futex(uaddr, FUTEX_WAIT, val, timeout, NULL, 0, opflags); } +/** + * futex_wait_multiple() - block on several futexes with optional timeout + * @fwb: wait block user space address + * @count: number of entities at fwb + * @timeout: absolute timeout + */ +static inline int +futex_wait_multiple(struct futex_wait_block *fwb, int count, + struct timespec *timeout, int opflags) +{ + return futex(fwb, FUTEX_WAIT_MULTIPLE, count, timeout, NULL, 0, + opflags); +} + /** * futex_wake() - wake one or more tasks blocked on uaddr * @nr_wake: wake up to this many tasks diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile index d2796ea98c5a..6703c7906b71 100644 --- a/tools/testing/selftests/x86/Makefile +++ b/tools/testing/selftests/x86/Makefile @@ -13,7 +13,7 @@ CAN_BUILD_WITH_NOPIE := $(shell ./check_cc.sh $(CC) trivial_program.c -no-pie) TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt test_mremap_vdso \ check_initial_reg_state sigreturn iopl ioperm \ test_vdso test_vsyscall mov_ss_trap \ - syscall_arg_fault + syscall_arg_fault fsgsbase_restore TARGETS_C_32BIT_ONLY := entry_from_vm86 test_syscall_vdso unwind_vdso \ test_FCMOV test_FCOMI test_FISTTP \ vdso_restorer diff --git a/tools/testing/selftests/x86/fsgsbase.c b/tools/testing/selftests/x86/fsgsbase.c index 15a329da59fa..998319553523 100644 --- a/tools/testing/selftests/x86/fsgsbase.c +++ b/tools/testing/selftests/x86/fsgsbase.c @@ -285,7 +285,8 @@ static unsigned short load_gs(void) /* 32-bit set_thread_area */ long ret; asm volatile ("int $0x80" - : "=a" (ret) : "a" (243), "b" (low_desc) + : "=a" (ret), "+m" (*low_desc) + : "a" (243), "b" (low_desc) : "r8", "r9", "r10", "r11"); memcpy(&desc, low_desc, sizeof(desc)); munmap(low_desc, sizeof(desc)); @@ -489,11 +490,28 @@ static void test_ptrace_write_gsbase(void) * selector value is changed or not by the GSBASE write in * a ptracer. */ - if (gs == 0 && base == 0xFF) { - printf("[OK]\tGS was reset as expected\n"); - } else { + if (gs != *shared_scratch) { nerrs++; - printf("[FAIL]\tGS=0x%lx, GSBASE=0x%lx (should be 0, 0xFF)\n", gs, base); + printf("[FAIL]\tGS changed to %lx\n", gs); + + /* + * On older kernels, poking a nonzero value into the + * base would zero the selector. On newer kernels, + * this behavior has changed -- poking the base + * changes only the base and, if FSGSBASE is not + * available, this may have no effect once the tracee + * is resumed. + */ + if (gs == 0) + printf("\tNote: this is expected behavior on older kernels.\n"); + } else if (have_fsgsbase && (base != 0xFF)) { + nerrs++; + printf("[FAIL]\tGSBASE changed to %lx\n", base); + } else { + printf("[OK]\tGS remained 0x%hx", *shared_scratch); + if (have_fsgsbase) + printf(" and GSBASE changed to 0xFF"); + printf("\n"); } } diff --git a/tools/testing/selftests/x86/fsgsbase_restore.c b/tools/testing/selftests/x86/fsgsbase_restore.c new file mode 100644 index 000000000000..6fffadc51579 --- /dev/null +++ b/tools/testing/selftests/x86/fsgsbase_restore.c @@ -0,0 +1,245 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * fsgsbase_restore.c, test ptrace vs fsgsbase + * Copyright (c) 2020 Andy Lutomirski + * + * This test case simulates a tracer redirecting tracee execution to + * a function and then restoring tracee state using PTRACE_GETREGS and + * PTRACE_SETREGS. This is similar to what gdb does when doing + * 'p func()'. The catch is that this test has the called function + * modify a segment register. This makes sure that ptrace correctly + * restores segment state when using PTRACE_SETREGS. + * + * This is not part of fsgsbase.c, because that test is 64-bit only. + */ + +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define EXPECTED_VALUE 0x1337f00d + +#ifdef __x86_64__ +# define SEG "%gs" +#else +# define SEG "%fs" +#endif + +static unsigned int dereference_seg_base(void) +{ + int ret; + asm volatile ("mov %" SEG ":(0), %0" : "=rm" (ret)); + return ret; +} + +static void init_seg(void) +{ + unsigned int *target = mmap( + NULL, sizeof(unsigned int), + PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_32BIT, -1, 0); + if (target == MAP_FAILED) + err(1, "mmap"); + + *target = EXPECTED_VALUE; + + printf("\tsegment base address = 0x%lx\n", (unsigned long)target); + + struct user_desc desc = { + .entry_number = 0, + .base_addr = (unsigned int)(uintptr_t)target, + .limit = sizeof(unsigned int) - 1, + .seg_32bit = 1, + .contents = 0, /* Data, grow-up */ + .read_exec_only = 0, + .limit_in_pages = 0, + .seg_not_present = 0, + .useable = 0 + }; + if (syscall(SYS_modify_ldt, 1, &desc, sizeof(desc)) == 0) { + printf("\tusing LDT slot 0\n"); + asm volatile ("mov %0, %" SEG :: "rm" ((unsigned short)0x7)); + } else { + /* No modify_ldt for us (configured out, perhaps) */ + + struct user_desc *low_desc = mmap( + NULL, sizeof(desc), + PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_32BIT, -1, 0); + memcpy(low_desc, &desc, sizeof(desc)); + + low_desc->entry_number = -1; + + /* 32-bit set_thread_area */ + long ret; + asm volatile ("int $0x80" + : "=a" (ret), "+m" (*low_desc) + : "a" (243), "b" (low_desc) +#ifdef __x86_64__ + : "r8", "r9", "r10", "r11" +#endif + ); + memcpy(&desc, low_desc, sizeof(desc)); + munmap(low_desc, sizeof(desc)); + + if (ret != 0) { + printf("[NOTE]\tcould not create a segment -- can't test anything\n"); + exit(0); + } + printf("\tusing GDT slot %d\n", desc.entry_number); + + unsigned short sel = (unsigned short)((desc.entry_number << 3) | 0x3); + asm volatile ("mov %0, %" SEG :: "rm" (sel)); + } +} + +static void tracee_zap_segment(void) +{ + /* + * The tracer will redirect execution here. This is meant to + * work like gdb's 'p func()' feature. The tricky bit is that + * we modify a segment register in order to make sure that ptrace + * can correctly restore segment registers. + */ + printf("\tTracee: in tracee_zap_segment()\n"); + + /* + * Write a nonzero selector with base zero to the segment register. + * Using a null selector would defeat the test on AMD pre-Zen2 + * CPUs, as such CPUs don't clear the base when loading a null + * selector. + */ + unsigned short sel; + asm volatile ("mov %%ss, %0\n\t" + "mov %0, %" SEG + : "=rm" (sel)); + + pid_t pid = getpid(), tid = syscall(SYS_gettid); + + printf("\tTracee is going back to sleep\n"); + syscall(SYS_tgkill, pid, tid, SIGSTOP); + + /* Should not get here. */ + while (true) { + printf("[FAIL]\tTracee hit unreachable code\n"); + pause(); + } +} + +int main() +{ + printf("\tSetting up a segment\n"); + init_seg(); + + unsigned int val = dereference_seg_base(); + if (val != EXPECTED_VALUE) { + printf("[FAIL]\tseg[0] == %x; should be %x\n", val, EXPECTED_VALUE); + return 1; + } + printf("[OK]\tThe segment points to the right place.\n"); + + pid_t chld = fork(); + if (chld < 0) + err(1, "fork"); + + if (chld == 0) { + prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0, 0); + + if (ptrace(PTRACE_TRACEME, 0, 0, 0) != 0) + err(1, "PTRACE_TRACEME"); + + pid_t pid = getpid(), tid = syscall(SYS_gettid); + + printf("\tTracee will take a nap until signaled\n"); + syscall(SYS_tgkill, pid, tid, SIGSTOP); + + printf("\tTracee was resumed. Will re-check segment.\n"); + + val = dereference_seg_base(); + if (val != EXPECTED_VALUE) { + printf("[FAIL]\tseg[0] == %x; should be %x\n", val, EXPECTED_VALUE); + exit(1); + } + + printf("[OK]\tThe segment points to the right place.\n"); + exit(0); + } + + int status; + + /* Wait for SIGSTOP. */ + if (waitpid(chld, &status, 0) != chld || !WIFSTOPPED(status)) + err(1, "waitpid"); + + struct user_regs_struct regs; + + if (ptrace(PTRACE_GETREGS, chld, NULL, ®s) != 0) + err(1, "PTRACE_GETREGS"); + +#ifdef __x86_64__ + printf("\tChild GS=0x%lx, GSBASE=0x%lx\n", (unsigned long)regs.gs, (unsigned long)regs.gs_base); +#else + printf("\tChild FS=0x%lx\n", (unsigned long)regs.xfs); +#endif + + struct user_regs_struct regs2 = regs; +#ifdef __x86_64__ + regs2.rip = (unsigned long)tracee_zap_segment; + regs2.rsp -= 128; /* Don't clobber the redzone. */ +#else + regs2.eip = (unsigned long)tracee_zap_segment; +#endif + + printf("\tTracer: redirecting tracee to tracee_zap_segment()\n"); + if (ptrace(PTRACE_SETREGS, chld, NULL, ®s2) != 0) + err(1, "PTRACE_GETREGS"); + if (ptrace(PTRACE_CONT, chld, NULL, NULL) != 0) + err(1, "PTRACE_GETREGS"); + + /* Wait for SIGSTOP. */ + if (waitpid(chld, &status, 0) != chld || !WIFSTOPPED(status)) + err(1, "waitpid"); + + printf("\tTracer: restoring tracee state\n"); + if (ptrace(PTRACE_SETREGS, chld, NULL, ®s) != 0) + err(1, "PTRACE_GETREGS"); + if (ptrace(PTRACE_DETACH, chld, NULL, NULL) != 0) + err(1, "PTRACE_GETREGS"); + + /* Wait for SIGSTOP. */ + if (waitpid(chld, &status, 0) != chld) + err(1, "waitpid"); + + if (WIFSIGNALED(status)) { + printf("[FAIL]\tTracee crashed\n"); + return 1; + } + + if (!WIFEXITED(status)) { + printf("[FAIL]\tTracee stopped for an unexpected reason: %d\n", status); + return 1; + } + + int exitcode = WEXITSTATUS(status); + if (exitcode != 0) { + printf("[FAIL]\tTracee reported failure\n"); + return 1; + } + + printf("[OK]\tAll is well.\n"); + return 0; +} diff --git a/tools/testing/selftests/x86/syscall_arg_fault.c b/tools/testing/selftests/x86/syscall_arg_fault.c index 5b7abebbcbb9..bff474b5efc6 100644 --- a/tools/testing/selftests/x86/syscall_arg_fault.c +++ b/tools/testing/selftests/x86/syscall_arg_fault.c @@ -53,6 +53,7 @@ static void sigsegv_or_sigbus(int sig, siginfo_t *info, void *ctx_void) if (ax != -EFAULT && ax != -ENOSYS) { printf("[FAIL]\tAX had the wrong value: 0x%lx\n", (unsigned long)ax); + printf("\tIP = 0x%lx\n", (unsigned long)ctx->uc_mcontext.gregs[REG_IP]); n_errs++; } else { printf("[OK]\tSeems okay\n"); @@ -207,5 +208,30 @@ int main() } set_eflags(get_eflags() & ~X86_EFLAGS_TF); +#ifdef __x86_64__ + printf("[RUN]\tSYSENTER with TF, invalid state, and GSBASE < 0\n"); + + if (sigsetjmp(jmpbuf, 1) == 0) { + sigtrap_consecutive_syscalls = 0; + + asm volatile ("wrgsbase %%rax\n\t" + :: "a" (0xffffffffffff0000UL)); + + set_eflags(get_eflags() | X86_EFLAGS_TF); + asm volatile ( + "movl $-1, %%eax\n\t" + "movl $-1, %%ebx\n\t" + "movl $-1, %%ecx\n\t" + "movl $-1, %%edx\n\t" + "movl $-1, %%esi\n\t" + "movl $-1, %%edi\n\t" + "movl $-1, %%ebp\n\t" + "movl $-1, %%esp\n\t" + "sysenter" + : : : "memory", "flags"); + } + set_eflags(get_eflags() & ~X86_EFLAGS_TF); +#endif + return 0; } diff --git a/usr/Kconfig b/usr/Kconfig index 96afb03b65f9..2599bc21c1b2 100644 --- a/usr/Kconfig +++ b/usr/Kconfig @@ -100,6 +100,15 @@ config RD_LZ4 Support loading of a LZ4 encoded initial ramdisk or cpio buffer If unsure, say N. +config RD_ZSTD + bool "Support initial ramdisk/ramfs compressed using ZSTD" + default y + depends on BLK_DEV_INITRD + select DECOMPRESS_ZSTD + help + Support loading of a ZSTD encoded initial ramdisk or cpio buffer. + If unsure, say N. + choice prompt "Built-in initramfs compression mode" depends on INITRAMFS_SOURCE != "" @@ -196,6 +205,17 @@ config INITRAMFS_COMPRESSION_LZ4 If you choose this, keep in mind that most distros don't provide lz4 by default which could cause a build failure. +config INITRAMFS_COMPRESSION_ZSTD + bool "ZSTD" + depends on RD_ZSTD + help + ZSTD is a compression algorithm targeting intermediate compression + with fast decompression speed. It will compress better than GZIP and + decompress around the same speed as LZO, but slower than LZ4. + + If you choose this, keep in mind that you may need to install the zstd + tool to be able to compress the initram. + config INITRAMFS_COMPRESSION_NONE bool "None" help diff --git a/usr/Makefile b/usr/Makefile index c12e6b15ce72..b1a81a40eab1 100644 --- a/usr/Makefile +++ b/usr/Makefile @@ -15,6 +15,7 @@ compress-$(CONFIG_INITRAMFS_COMPRESSION_LZMA) := lzma compress-$(CONFIG_INITRAMFS_COMPRESSION_XZ) := xzmisc compress-$(CONFIG_INITRAMFS_COMPRESSION_LZO) := lzo compress-$(CONFIG_INITRAMFS_COMPRESSION_LZ4) := lz4 +compress-$(CONFIG_INITRAMFS_COMPRESSION_ZSTD) := zstd obj-$(CONFIG_BLK_DEV_INITRD) := initramfs_data.o